diff options
Diffstat (limited to 'drivers/net/ethernet')
573 files changed, 74725 insertions, 24983 deletions
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 2a0ddec1dd56..3dcc61821ed5 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -377,9 +377,7 @@ static int ax_mii_probe(struct net_device *dev) return ret; } - /* mask with MAC supported features */ - phy_dev->supported &= PHY_BASIC_FEATURES; - phy_dev->advertising = phy_dev->supported; + phy_set_max_speed(phy_dev, SPEED_100); netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq); diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c index 32e9627e3880..77191a281866 100644 --- a/drivers/net/ethernet/8390/etherh.c +++ b/drivers/net/ethernet/8390/etherh.c @@ -564,26 +564,29 @@ static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i sizeof(info->bus_info)); } -static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int etherh_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - cmd->supported = etherh_priv(dev)->supported; - ethtool_cmd_speed_set(cmd, SPEED_10); - cmd->duplex = DUPLEX_HALF; - cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC; - cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ? - AUTONEG_ENABLE : AUTONEG_DISABLE); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + etherh_priv(dev)->supported); + cmd->base.speed = SPEED_10; + cmd->base.duplex = DUPLEX_HALF; + cmd->base.port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC; + cmd->base.autoneg = (dev->flags & IFF_AUTOMEDIA ? AUTONEG_ENABLE : + AUTONEG_DISABLE); return 0; } -static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int etherh_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { - switch (cmd->autoneg) { + switch (cmd->base.autoneg) { case AUTONEG_ENABLE: dev->flags |= IFF_AUTOMEDIA; break; case AUTONEG_DISABLE: - switch (cmd->port) { + switch (cmd->base.port) { case PORT_TP: dev->if_port = IF_PORT_10BASET; break; @@ -622,12 +625,12 @@ static void etherh_set_msglevel(struct net_device *dev, u32 v) } static const struct ethtool_ops etherh_ethtool_ops = { - .get_settings = etherh_get_settings, - .set_settings = etherh_set_settings, - .get_drvinfo = etherh_get_drvinfo, - .get_ts_info = ethtool_op_get_ts_info, - .get_msglevel = etherh_get_msglevel, - .set_msglevel = etherh_set_msglevel, + .get_drvinfo = etherh_get_drvinfo, + .get_ts_info = ethtool_op_get_ts_info, + .get_msglevel = etherh_get_msglevel, + .set_msglevel = etherh_set_msglevel, + .get_link_ksettings = etherh_get_link_ksettings, + .set_link_ksettings = etherh_set_link_ksettings, }; static const struct net_device_ops etherh_netdev_ops = { diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 6fde68aa13a4..885e00d17807 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -108,6 +108,13 @@ config LANTIQ_ETOP ---help--- Support for the MII0 inside the Lantiq SoC +config LANTIQ_XRX200 + tristate "Lantiq / Intel xRX200 PMAC network driver" + depends on SOC_TYPE_XWAY + ---help--- + Support for the PMAC of the Gigabit switch (GSWIP) inside the + Lantiq / Intel VRX200 VDSL SoC + source "drivers/net/ethernet/marvell/Kconfig" source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index b45d5f626b59..7b5bf9682066 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o +obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 4309be3724ad..7c9348a26cbb 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1279,9 +1279,9 @@ static int greth_mdio_probe(struct net_device *dev) } if (greth->gbit_mac) - phy->supported &= PHY_GBIT_FEATURES; + phy_set_max_speed(phy, SPEED_1000); else - phy->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phy, SPEED_100); phy->advertising = phy->supported; diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 48220b6c600d..ea34bcb868b5 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3258,19 +3258,11 @@ static int et131x_mii_probe(struct net_device *netdev) return PTR_ERR(phydev); } - phydev->supported &= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_MII | - SUPPORTED_TP); + phy_set_max_speed(phydev, SPEED_100); if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) - phydev->supported |= SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full; + phy_set_max_speed(phydev, SPEED_1000); - phydev->advertising = phydev->supported; phydev->autoneg = AUTONEG_ENABLE; phy_attached_info(phydev); diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h index d0c388cfd52f..3add305d34b4 100644 --- a/drivers/net/ethernet/alacritech/slic.h +++ b/drivers/net/ethernet/alacritech/slic.h @@ -8,7 +8,6 @@ #include <linux/spinlock_types.h> #include <linux/dma-mapping.h> #include <linux/pci.h> -#include <linux/netdevice.h> #include <linux/list.h> #include <linux/u64_stats_sync.h> diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 3143de45baaa..e1acafa82214 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -172,8 +172,7 @@ static int emac_mdio_probe(struct net_device *dev) } /* mask with MAC supported features */ - phydev->supported &= PHY_BASIC_FEATURES; - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, SPEED_100); db->link = 0; db->speed = 0; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index baca8f704a45..02921d877c08 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -835,13 +835,10 @@ static int init_phy(struct net_device *dev) } /* Stop Advertising 1000BASE Capability if interface is not GMII - * Note: Checkpatch throws CHECKs for the camel case defines below, - * it's ok to ignore. */ if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) || (priv->phy_iface == PHY_INTERFACE_MODE_RMII)) - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + phy_set_max_speed(phydev, SPEED_100); /* Broken HW is sometimes missing the pull-up resistor on the * MDIO line, which results in reads to non-existent devices returning diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig index 99b30353541a..9e87d7b8360f 100644 --- a/drivers/net/ethernet/amazon/Kconfig +++ b/drivers/net/ethernet/amazon/Kconfig @@ -17,7 +17,7 @@ if NET_VENDOR_AMAZON config ENA_ETHERNET tristate "Elastic Network Adapter (ENA) support" - depends on (PCI_MSI && X86) + depends on PCI_MSI && !CPU_BIG_ENDIAN ---help--- This driver supports Elastic Network Adapter (ENA)" diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index 4532e574ebcd..9f80b73f90b1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -32,115 +32,81 @@ #ifndef _ENA_ADMIN_H_ #define _ENA_ADMIN_H_ -enum ena_admin_aq_opcode { - ENA_ADMIN_CREATE_SQ = 1, - - ENA_ADMIN_DESTROY_SQ = 2, - - ENA_ADMIN_CREATE_CQ = 3, - - ENA_ADMIN_DESTROY_CQ = 4, - ENA_ADMIN_GET_FEATURE = 8, - - ENA_ADMIN_SET_FEATURE = 9, - - ENA_ADMIN_GET_STATS = 11, +enum ena_admin_aq_opcode { + ENA_ADMIN_CREATE_SQ = 1, + ENA_ADMIN_DESTROY_SQ = 2, + ENA_ADMIN_CREATE_CQ = 3, + ENA_ADMIN_DESTROY_CQ = 4, + ENA_ADMIN_GET_FEATURE = 8, + ENA_ADMIN_SET_FEATURE = 9, + ENA_ADMIN_GET_STATS = 11, }; enum ena_admin_aq_completion_status { - ENA_ADMIN_SUCCESS = 0, - - ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, - - ENA_ADMIN_BAD_OPCODE = 2, - - ENA_ADMIN_UNSUPPORTED_OPCODE = 3, - - ENA_ADMIN_MALFORMED_REQUEST = 4, - + ENA_ADMIN_SUCCESS = 0, + ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, + ENA_ADMIN_BAD_OPCODE = 2, + ENA_ADMIN_UNSUPPORTED_OPCODE = 3, + ENA_ADMIN_MALFORMED_REQUEST = 4, /* Additional status is provided in ACQ entry extended_status */ - ENA_ADMIN_ILLEGAL_PARAMETER = 5, - - ENA_ADMIN_UNKNOWN_ERROR = 6, + ENA_ADMIN_ILLEGAL_PARAMETER = 5, + ENA_ADMIN_UNKNOWN_ERROR = 6, + ENA_ADMIN_RESOURCE_BUSY = 7, }; enum ena_admin_aq_feature_id { - ENA_ADMIN_DEVICE_ATTRIBUTES = 1, - - ENA_ADMIN_MAX_QUEUES_NUM = 2, - - ENA_ADMIN_HW_HINTS = 3, - - ENA_ADMIN_RSS_HASH_FUNCTION = 10, - - ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, - - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, - - ENA_ADMIN_MTU = 14, - - ENA_ADMIN_RSS_HASH_INPUT = 18, - - ENA_ADMIN_INTERRUPT_MODERATION = 20, - - ENA_ADMIN_AENQ_CONFIG = 26, - - ENA_ADMIN_LINK_CONFIG = 27, - - ENA_ADMIN_HOST_ATTR_CONFIG = 28, - - ENA_ADMIN_FEATURES_OPCODE_NUM = 32, + ENA_ADMIN_DEVICE_ATTRIBUTES = 1, + ENA_ADMIN_MAX_QUEUES_NUM = 2, + ENA_ADMIN_HW_HINTS = 3, + ENA_ADMIN_LLQ = 4, + ENA_ADMIN_RSS_HASH_FUNCTION = 10, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, + ENA_ADMIN_MTU = 14, + ENA_ADMIN_RSS_HASH_INPUT = 18, + ENA_ADMIN_INTERRUPT_MODERATION = 20, + ENA_ADMIN_AENQ_CONFIG = 26, + ENA_ADMIN_LINK_CONFIG = 27, + ENA_ADMIN_HOST_ATTR_CONFIG = 28, + ENA_ADMIN_FEATURES_OPCODE_NUM = 32, }; enum ena_admin_placement_policy_type { /* descriptors and headers are in host memory */ - ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, - + ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, /* descriptors and headers are in device memory (a.k.a Low Latency * Queue) */ - ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, + ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, }; enum ena_admin_link_types { - ENA_ADMIN_LINK_SPEED_1G = 0x1, - - ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, - - ENA_ADMIN_LINK_SPEED_5G = 0x4, - - ENA_ADMIN_LINK_SPEED_10G = 0x8, - - ENA_ADMIN_LINK_SPEED_25G = 0x10, - - ENA_ADMIN_LINK_SPEED_40G = 0x20, - - ENA_ADMIN_LINK_SPEED_50G = 0x40, - - ENA_ADMIN_LINK_SPEED_100G = 0x80, - - ENA_ADMIN_LINK_SPEED_200G = 0x100, - - ENA_ADMIN_LINK_SPEED_400G = 0x200, + ENA_ADMIN_LINK_SPEED_1G = 0x1, + ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, + ENA_ADMIN_LINK_SPEED_5G = 0x4, + ENA_ADMIN_LINK_SPEED_10G = 0x8, + ENA_ADMIN_LINK_SPEED_25G = 0x10, + ENA_ADMIN_LINK_SPEED_40G = 0x20, + ENA_ADMIN_LINK_SPEED_50G = 0x40, + ENA_ADMIN_LINK_SPEED_100G = 0x80, + ENA_ADMIN_LINK_SPEED_200G = 0x100, + ENA_ADMIN_LINK_SPEED_400G = 0x200, }; enum ena_admin_completion_policy_type { /* completion queue entry for each sq descriptor */ - ENA_ADMIN_COMPLETION_POLICY_DESC = 0, - + ENA_ADMIN_COMPLETION_POLICY_DESC = 0, /* completion queue entry upon request in sq descriptor */ - ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, - + ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, /* current queue head pointer is updated in OS memory upon sq * descriptor request */ - ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, - + ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, /* current queue head pointer is updated in OS memory for each sq * descriptor */ - ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, + ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, }; /* basic stats return ena_admin_basic_stats while extanded stats return a @@ -148,15 +114,13 @@ enum ena_admin_completion_policy_type { * device id */ enum ena_admin_get_stats_type { - ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, - - ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, + ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, + ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, }; enum ena_admin_get_stats_scope { - ENA_ADMIN_SPECIFIC_QUEUE = 0, - - ENA_ADMIN_ETH_TRAFFIC = 1, + ENA_ADMIN_SPECIFIC_QUEUE = 0, + ENA_ADMIN_ETH_TRAFFIC = 1, }; struct ena_admin_aq_common_desc { @@ -227,7 +191,9 @@ struct ena_admin_acq_common_desc { u16 extended_status; - /* serves as a hint what AQ entries can be revoked */ + /* indicates to the driver which AQ entry has been consumed by the + * device and could be reused + */ u16 sq_head_indx; }; @@ -296,9 +262,8 @@ struct ena_admin_aq_create_sq_cmd { }; enum ena_admin_sq_direction { - ENA_ADMIN_SQ_DIRECTION_TX = 1, - - ENA_ADMIN_SQ_DIRECTION_RX = 2, + ENA_ADMIN_SQ_DIRECTION_TX = 1, + ENA_ADMIN_SQ_DIRECTION_RX = 2, }; struct ena_admin_acq_create_sq_resp_desc { @@ -483,8 +448,85 @@ struct ena_admin_device_attr_feature_desc { u32 max_mtu; }; +enum ena_admin_llq_header_location { + /* header is in descriptor list */ + ENA_ADMIN_INLINE_HEADER = 1, + /* header in a separate ring, implies 16B descriptor list entry */ + ENA_ADMIN_HEADER_RING = 2, +}; + +enum ena_admin_llq_ring_entry_size { + ENA_ADMIN_LIST_ENTRY_SIZE_128B = 1, + ENA_ADMIN_LIST_ENTRY_SIZE_192B = 2, + ENA_ADMIN_LIST_ENTRY_SIZE_256B = 4, +}; + +enum ena_admin_llq_num_descs_before_header { + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0 = 0, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1 = 1, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2 = 2, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4 = 4, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8 = 8, +}; + +/* packet descriptor list entry always starts with one or more descriptors, + * followed by a header. The rest of the descriptors are located in the + * beginning of the subsequent entry. Stride refers to how the rest of the + * descriptors are placed. This field is relevant only for inline header + * mode + */ +enum ena_admin_llq_stride_ctrl { + ENA_ADMIN_SINGLE_DESC_PER_ENTRY = 1, + ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2, +}; + +struct ena_admin_feature_llq_desc { + u32 max_llq_num; + + u32 max_llq_depth; + + /* specify the header locations the device supports. bitfield of + * enum ena_admin_llq_header_location. + */ + u16 header_location_ctrl_supported; + + /* the header location the driver selected to use. */ + u16 header_location_ctrl_enabled; + + /* if inline header is specified - this is the size of descriptor + * list entry. If header in a separate ring is specified - this is + * the size of header ring entry. bitfield of enum + * ena_admin_llq_ring_entry_size. specify the entry sizes the device + * supports + */ + u16 entry_size_ctrl_supported; + + /* the entry size the driver selected to use. */ + u16 entry_size_ctrl_enabled; + + /* valid only if inline header is specified. First entry associated + * with the packet includes descriptors and header. Rest of the + * entries occupied by descriptors. This parameter defines the max + * number of descriptors precedding the header in the first entry. + * The field is bitfield of enum + * ena_admin_llq_num_descs_before_header and specify the values the + * device supports + */ + u16 desc_num_before_header_supported; + + /* the desire field the driver selected to use */ + u16 desc_num_before_header_enabled; + + /* valid only if inline was chosen. bitfield of enum + * ena_admin_llq_stride_ctrl + */ + u16 descriptors_stride_ctrl_supported; + + /* the stride control the driver selected to use */ + u16 descriptors_stride_ctrl_enabled; +}; + struct ena_admin_queue_feature_desc { - /* including LLQs */ u32 max_sq_num; u32 max_sq_depth; @@ -493,9 +535,9 @@ struct ena_admin_queue_feature_desc { u32 max_cq_depth; - u32 max_llq_num; + u32 max_legacy_llq_num; - u32 max_llq_depth; + u32 max_legacy_llq_depth; u32 max_header_size; @@ -583,9 +625,8 @@ struct ena_admin_feature_offload_desc { }; enum ena_admin_hash_functions { - ENA_ADMIN_TOEPLITZ = 1, - - ENA_ADMIN_CRC32 = 2, + ENA_ADMIN_TOEPLITZ = 1, + ENA_ADMIN_CRC32 = 2, }; struct ena_admin_feature_rss_flow_hash_control { @@ -611,50 +652,35 @@ struct ena_admin_feature_rss_flow_hash_function { /* RSS flow hash protocols */ enum ena_admin_flow_hash_proto { - ENA_ADMIN_RSS_TCP4 = 0, - - ENA_ADMIN_RSS_UDP4 = 1, - - ENA_ADMIN_RSS_TCP6 = 2, - - ENA_ADMIN_RSS_UDP6 = 3, - - ENA_ADMIN_RSS_IP4 = 4, - - ENA_ADMIN_RSS_IP6 = 5, - - ENA_ADMIN_RSS_IP4_FRAG = 6, - - ENA_ADMIN_RSS_NOT_IP = 7, - + ENA_ADMIN_RSS_TCP4 = 0, + ENA_ADMIN_RSS_UDP4 = 1, + ENA_ADMIN_RSS_TCP6 = 2, + ENA_ADMIN_RSS_UDP6 = 3, + ENA_ADMIN_RSS_IP4 = 4, + ENA_ADMIN_RSS_IP6 = 5, + ENA_ADMIN_RSS_IP4_FRAG = 6, + ENA_ADMIN_RSS_NOT_IP = 7, /* TCPv6 with extension header */ - ENA_ADMIN_RSS_TCP6_EX = 8, - + ENA_ADMIN_RSS_TCP6_EX = 8, /* IPv6 with extension header */ - ENA_ADMIN_RSS_IP6_EX = 9, - - ENA_ADMIN_RSS_PROTO_NUM = 16, + ENA_ADMIN_RSS_IP6_EX = 9, + ENA_ADMIN_RSS_PROTO_NUM = 16, }; /* RSS flow hash fields */ enum ena_admin_flow_hash_fields { /* Ethernet Dest Addr */ - ENA_ADMIN_RSS_L2_DA = BIT(0), - + ENA_ADMIN_RSS_L2_DA = BIT(0), /* Ethernet Src Addr */ - ENA_ADMIN_RSS_L2_SA = BIT(1), - + ENA_ADMIN_RSS_L2_SA = BIT(1), /* ipv4/6 Dest Addr */ - ENA_ADMIN_RSS_L3_DA = BIT(2), - + ENA_ADMIN_RSS_L3_DA = BIT(2), /* ipv4/6 Src Addr */ - ENA_ADMIN_RSS_L3_SA = BIT(3), - + ENA_ADMIN_RSS_L3_SA = BIT(3), /* tcp/udp Dest Port */ - ENA_ADMIN_RSS_L4_DP = BIT(4), - + ENA_ADMIN_RSS_L4_DP = BIT(4), /* tcp/udp Src Port */ - ENA_ADMIN_RSS_L4_SP = BIT(5), + ENA_ADMIN_RSS_L4_SP = BIT(5), }; struct ena_admin_proto_input { @@ -693,15 +719,13 @@ struct ena_admin_feature_rss_flow_hash_input { }; enum ena_admin_os_type { - ENA_ADMIN_OS_LINUX = 1, - - ENA_ADMIN_OS_WIN = 2, - - ENA_ADMIN_OS_DPDK = 3, - - ENA_ADMIN_OS_FREEBSD = 4, - - ENA_ADMIN_OS_IPXE = 5, + ENA_ADMIN_OS_LINUX = 1, + ENA_ADMIN_OS_WIN = 2, + ENA_ADMIN_OS_DPDK = 3, + ENA_ADMIN_OS_FREEBSD = 4, + ENA_ADMIN_OS_IPXE = 5, + ENA_ADMIN_OS_ESXI = 6, + ENA_ADMIN_OS_GROUPS_NUM = 6, }; struct ena_admin_host_info { @@ -723,11 +747,27 @@ struct ena_admin_host_info { /* 7:0 : major * 15:8 : minor * 23:16 : sub_minor + * 31:24 : module_type */ u32 driver_version; /* features bitmap */ - u32 supported_network_features[4]; + u32 supported_network_features[2]; + + /* ENA spec version of driver */ + u16 ena_spec_version; + + /* ENA device's Bus, Device and Function + * 2:0 : function + * 7:3 : device + * 15:8 : bus + */ + u16 bdf; + + /* Number of CPUs */ + u16 num_cpus; + + u16 reserved; }; struct ena_admin_rss_ind_table_entry { @@ -800,6 +840,8 @@ struct ena_admin_get_feat_resp { struct ena_admin_device_attr_feature_desc dev_attr; + struct ena_admin_feature_llq_desc llq; + struct ena_admin_queue_feature_desc max_queue; struct ena_admin_feature_aenq_desc aenq; @@ -847,6 +889,9 @@ struct ena_admin_set_feat_cmd { /* rss indirection table */ struct ena_admin_feature_rss_ind_table ind_table; + + /* LLQ configuration */ + struct ena_admin_feature_llq_desc llq; } u; }; @@ -875,25 +920,18 @@ struct ena_admin_aenq_common_desc { /* asynchronous event notification groups */ enum ena_admin_aenq_group { - ENA_ADMIN_LINK_CHANGE = 0, - - ENA_ADMIN_FATAL_ERROR = 1, - - ENA_ADMIN_WARNING = 2, - - ENA_ADMIN_NOTIFICATION = 3, - - ENA_ADMIN_KEEP_ALIVE = 4, - - ENA_ADMIN_AENQ_GROUPS_NUM = 5, + ENA_ADMIN_LINK_CHANGE = 0, + ENA_ADMIN_FATAL_ERROR = 1, + ENA_ADMIN_WARNING = 2, + ENA_ADMIN_NOTIFICATION = 3, + ENA_ADMIN_KEEP_ALIVE = 4, + ENA_ADMIN_AENQ_GROUPS_NUM = 5, }; enum ena_admin_aenq_notification_syndrom { - ENA_ADMIN_SUSPEND = 0, - - ENA_ADMIN_RESUME = 1, - - ENA_ADMIN_UPDATE_HINTS = 2, + ENA_ADMIN_SUSPEND = 0, + ENA_ADMIN_RESUME = 1, + ENA_ADMIN_UPDATE_HINTS = 2, }; struct ena_admin_aenq_entry { @@ -928,27 +966,27 @@ struct ena_admin_ena_mmio_req_read_less_resp { }; /* aq_common_desc */ -#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) -#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) +#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) /* sq */ -#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 -#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) +#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) /* acq_common_desc */ -#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) -#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) /* aq_create_sq_cmd */ -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) #define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0) /* aq_create_cq_cmd */ @@ -957,12 +995,12 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) /* get_set_feature_common_desc */ -#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) +#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) /* get_feature_link_desc */ -#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) -#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 -#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) /* feature_offload_desc */ #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0) @@ -974,19 +1012,19 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3) #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4) -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0) #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1) #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2) -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) /* feature_rss_flow_hash_function */ #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0) @@ -994,25 +1032,32 @@ struct ena_admin_ena_mmio_req_read_less_resp { /* feature_rss_flow_hash_input */ #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1 -#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2 -#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1) #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2) /* host_info */ -#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) -#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 -#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) -#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 -#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) +#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) +#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 +#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) +#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24 +#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24) +#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0) +#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3 +#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3) +#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8 +#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8) /* aenq_common_desc */ -#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) /* aenq_link_change_desc */ -#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) +#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) #endif /*_ENA_ADMIN_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 7635c38e77dd..420cede41ca4 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -41,9 +41,6 @@ #define ENA_ASYNC_QUEUE_DEPTH 16 #define ENA_ADMIN_QUEUE_DEPTH 32 -#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ - ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ - | (ENA_COMMON_SPEC_VERSION_MINOR)) #define ENA_CTRL_MAJOR 0 #define ENA_CTRL_MINOR 0 @@ -61,6 +58,8 @@ #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF +#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 + #define ENA_REGS_ADMIN_INTR_MASK 1 #define ENA_POLL_MS 5 @@ -236,7 +235,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu tail_masked = admin_queue->sq.tail & queue_size_mask; /* In case of queue FULL */ - cnt = atomic_read(&admin_queue->outstanding_cmds); + cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); if (cnt >= admin_queue->q_depth) { pr_debug("admin queue is full.\n"); admin_queue->stats.out_of_space++; @@ -305,7 +304,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue struct ena_admin_acq_entry *comp, size_t comp_size_in_bytes) { - unsigned long flags; + unsigned long flags = 0; struct ena_comp_ctx *comp_ctx; spin_lock_irqsave(&admin_queue->q_lock, flags); @@ -333,7 +332,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); - io_sq->dma_addr_bits = ena_dev->dma_addr_bits; + io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; io_sq->desc_entry_size = (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? sizeof(struct ena_eth_io_tx_desc) : @@ -355,21 +354,48 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, &io_sq->desc_addr.phys_addr, GFP_KERNEL); } - } else { + + if (!io_sq->desc_addr.virt_addr) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + } + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + /* Allocate bounce buffers */ + io_sq->bounce_buf_ctrl.buffer_size = + ena_dev->llq_info.desc_list_entry_size; + io_sq->bounce_buf_ctrl.buffers_num = + ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; + io_sq->bounce_buf_ctrl.next_to_use = 0; + + size = io_sq->bounce_buf_ctrl.buffer_size * + io_sq->bounce_buf_ctrl.buffers_num; + dev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); - io_sq->desc_addr.virt_addr = + io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); set_dev_node(ena_dev->dmadev, dev_node); - if (!io_sq->desc_addr.virt_addr) { - io_sq->desc_addr.virt_addr = + if (!io_sq->bounce_buf_ctrl.base_buffer) + io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); + + if (!io_sq->bounce_buf_ctrl.base_buffer) { + pr_err("bounce buffer memory allocation failed"); + return -ENOMEM; } - } - if (!io_sq->desc_addr.virt_addr) { - pr_err("memory allocation failed"); - return -ENOMEM; + memcpy(&io_sq->llq_info, &ena_dev->llq_info, + sizeof(io_sq->llq_info)); + + /* Initiate the first bounce buffer */ + io_sq->llq_buf_ctrl.curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, io_sq->llq_info.desc_list_entry_size); + io_sq->llq_buf_ctrl.descs_left_in_line = + io_sq->llq_info.descs_num_before_header; } io_sq->tail = 0; @@ -460,7 +486,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu /* Go over all the completions */ while ((READ_ONCE(cqe->acq_common_descriptor.flags) & - ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { + ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { /* Do not read the rest of the completion entry before the * phase bit was validated */ @@ -511,7 +537,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status) static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { - unsigned long flags, timeout; + unsigned long flags = 0; + unsigned long timeout; int ret; timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); @@ -557,10 +584,160 @@ err: return ret; } +/** + * Set the LLQ configurations of the firmware + * + * The driver provides only the enabled feature values to the device, + * which in turn, checks if they are supported. + */ +static int ena_com_set_llq(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; + int ret; + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.feat_common.feature_id = ENA_ADMIN_LLQ; + + cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; + cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; + cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; + cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + pr_err("Failed to set LLQ configurations: %d\n", ret); + + return ret; +} + +static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq_features, + struct ena_llq_configurations *llq_default_cfg) +{ + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; + u16 supported_feat; + int rc; + + memset(llq_info, 0, sizeof(*llq_info)); + + supported_feat = llq_features->header_location_ctrl_supported; + + if (likely(supported_feat & llq_default_cfg->llq_header_location)) { + llq_info->header_location_ctrl = + llq_default_cfg->llq_header_location; + } else { + pr_err("Invalid header location control, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { + supported_feat = llq_features->descriptors_stride_ctrl_supported; + if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { + llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; + } else { + if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { + llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; + } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { + llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; + } else { + pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_stride_ctrl, supported_feat, + llq_info->desc_stride_ctrl); + } + } else { + llq_info->desc_stride_ctrl = 0; + } + + supported_feat = llq_features->entry_size_ctrl_supported; + if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { + llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; + llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; + } else { + if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; + llq_info->desc_list_entry_size = 128; + } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; + llq_info->desc_list_entry_size = 192; + } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; + llq_info->desc_list_entry_size = 256; + } else { + pr_err("Invalid entry_size_ctrl, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_ring_entry_size, supported_feat, + llq_info->desc_list_entry_size); + } + if (unlikely(llq_info->desc_list_entry_size & 0x7)) { + /* The desc list entry size should be whole multiply of 8 + * This requirement comes from __iowrite64_copy() + */ + pr_err("illegal entry size %d\n", + llq_info->desc_list_entry_size); + return -EINVAL; + } + + if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) + llq_info->descs_per_entry = llq_info->desc_list_entry_size / + sizeof(struct ena_eth_io_tx_desc); + else + llq_info->descs_per_entry = 1; + + supported_feat = llq_features->desc_num_before_header_supported; + if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { + llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; + } else { + if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; + } else { + pr_err("Invalid descs_num_before_header, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_num_decs_before_header, + supported_feat, llq_info->descs_num_before_header); + } + + rc = ena_com_set_llq(ena_dev); + if (rc) + pr_err("Cannot set LLQ configuration: %d\n", rc); + + return 0; +} + static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { - unsigned long flags; + unsigned long flags = 0; int ret; wait_for_completion_timeout(&comp_ctx->wait_event, @@ -606,7 +783,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = mmio_read->read_resp; u32 mmio_read_reg, ret, i; - unsigned long flags; + unsigned long flags = 0; u32 timeout = mmio_read->reg_read_to; might_sleep(); @@ -728,15 +905,17 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, if (io_sq->desc_addr.virt_addr) { size = io_sq->desc_entry_size * io_sq->q_depth; - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) - dma_free_coherent(ena_dev->dmadev, size, - io_sq->desc_addr.virt_addr, - io_sq->desc_addr.phys_addr); - else - devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr); + dma_free_coherent(ena_dev->dmadev, size, + io_sq->desc_addr.virt_addr, + io_sq->desc_addr.phys_addr); io_sq->desc_addr.virt_addr = NULL; } + + if (io_sq->bounce_buf_ctrl.base_buffer) { + devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); + io_sq->bounce_buf_ctrl.base_buffer = NULL; + } } static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, @@ -1248,7 +1427,7 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; - unsigned long flags; + unsigned long flags = 0; spin_lock_irqsave(&admin_queue->q_lock, flags); while (atomic_read(&admin_queue->outstanding_cmds) != 0) { @@ -1292,7 +1471,7 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; - unsigned long flags; + unsigned long flags = 0; spin_lock_irqsave(&admin_queue->q_lock, flags); ena_dev->admin_queue.running_state = state; @@ -1326,7 +1505,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) } if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { - pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", + pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", get_resp.u.aenq.supported_groups, groups_flag); return -EOPNOTSUPP; } @@ -1400,11 +1579,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); - if (ver < MIN_ENA_VER) { - pr_err("ENA version is lower than the minimal version the driver supports\n"); - return -1; - } - pr_info("ena controller version: %d.%d.%d implementation version %d\n", (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, @@ -1479,7 +1653,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) sizeof(*mmio_read->read_resp), &mmio_read->read_resp_dma_addr, GFP_KERNEL); if (unlikely(!mmio_read->read_resp)) - return -ENOMEM; + goto err; ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); @@ -1488,6 +1662,10 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) mmio_read->readless_supported = true; return 0; + +err: + + return -ENOMEM; } void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) @@ -1523,8 +1701,7 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) } int ena_com_admin_init(struct ena_com_dev *ena_dev, - struct ena_aenq_handlers *aenq_handlers, - bool init_spinlock) + struct ena_aenq_handlers *aenq_handlers) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; @@ -1550,8 +1727,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, atomic_set(&admin_queue->outstanding_cmds, 0); - if (init_spinlock) - spin_lock_init(&admin_queue->q_lock); + spin_lock_init(&admin_queue->q_lock); ret = ena_com_init_comp_ctxt(admin_queue); if (ret) @@ -1748,6 +1924,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, else return rc; + rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ); + if (!rc) + memcpy(&get_feat_ctx->llq, &get_resp.u.llq, + sizeof(get_resp.u.llq)); + else if (rc == -EOPNOTSUPP) + memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); + else + return rc; + return 0; } @@ -1779,6 +1964,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) struct ena_admin_aenq_entry *aenq_e; struct ena_admin_aenq_common_desc *aenq_common; struct ena_com_aenq *aenq = &dev->aenq; + unsigned long long timestamp; ena_aenq_handler handler_cb; u16 masked_head, processed = 0; u8 phase; @@ -1796,10 +1982,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) */ dma_rmb(); + timestamp = + (unsigned long long)aenq_common->timestamp_low | + ((unsigned long long)aenq_common->timestamp_high << 32); pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", - aenq_common->group, aenq_common->syndrom, - (u64)aenq_common->timestamp_low + - ((u64)aenq_common->timestamp_high << 32)); + aenq_common->group, aenq_common->syndrom, timestamp); /* Handle specific event*/ handler_cb = ena_com_get_specific_aenq_cb(dev, @@ -2441,6 +2628,10 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) if (unlikely(!host_attr->host_info)) return -ENOMEM; + host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | + (ENA_COMMON_SPEC_VERSION_MINOR)); + return 0; } @@ -2712,3 +2903,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, intr_moder_tbl[level].pkts_per_interval; entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; } + +int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq_features, + struct ena_llq_configurations *llq_default_cfg) +{ + int rc; + int size; + + if (!llq_features->max_llq_num) { + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); + if (rc) + return rc; + + /* Validate the descriptor is not too big */ + size = ena_dev->tx_max_header_size; + size += ena_dev->llq_info.descs_num_before_header * + sizeof(struct ena_eth_io_tx_desc); + + if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) { + pr_err("the size of the LLQ entry is smaller than needed\n"); + return -EINVAL; + } + + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; + + return 0; +} diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 7b784f8a06a6..078d6f2b4f39 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -37,6 +37,8 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> +#include <linux/io.h> +#include <linux/prefetch.h> #include <linux/sched.h> #include <linux/sizes.h> #include <linux/spinlock.h> @@ -108,6 +110,14 @@ enum ena_intr_moder_level { ENA_INTR_MAX_NUM_OF_LEVELS, }; +struct ena_llq_configurations { + enum ena_admin_llq_header_location llq_header_location; + enum ena_admin_llq_ring_entry_size llq_ring_entry_size; + enum ena_admin_llq_stride_ctrl llq_stride_ctrl; + enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header; + u16 llq_ring_entry_size_value; +}; + struct ena_intr_moder_entry { unsigned int intr_moder_interval; unsigned int pkts_per_interval; @@ -142,6 +152,15 @@ struct ena_com_tx_meta { u16 l4_hdr_len; /* In words */ }; +struct ena_com_llq_info { + u16 header_location_ctrl; + u16 desc_stride_ctrl; + u16 desc_list_entry_size_ctrl; + u16 desc_list_entry_size; + u16 descs_num_before_header; + u16 descs_per_entry; +}; + struct ena_com_io_cq { struct ena_com_io_desc_addr cdesc_addr; @@ -179,6 +198,20 @@ struct ena_com_io_cq { } ____cacheline_aligned; +struct ena_com_io_bounce_buffer_control { + u8 *base_buffer; + u16 next_to_use; + u16 buffer_size; + u16 buffers_num; /* Must be a power of 2 */ +}; + +/* This struct is to keep tracking the current location of the next llq entry */ +struct ena_com_llq_pkt_ctrl { + u8 *curr_bounce_buf; + u16 idx; + u16 descs_left_in_line; +}; + struct ena_com_io_sq { struct ena_com_io_desc_addr desc_addr; @@ -190,6 +223,9 @@ struct ena_com_io_sq { u32 msix_vector; struct ena_com_tx_meta cached_tx_meta; + struct ena_com_llq_info llq_info; + struct ena_com_llq_pkt_ctrl llq_buf_ctrl; + struct ena_com_io_bounce_buffer_control bounce_buf_ctrl; u16 q_depth; u16 qid; @@ -197,6 +233,7 @@ struct ena_com_io_sq { u16 idx; u16 tail; u16 next_to_comp; + u16 llq_last_copy_tail; u32 tx_max_header_size; u8 phase; u8 desc_entry_size; @@ -334,6 +371,8 @@ struct ena_com_dev { u16 intr_delay_resolution; u32 intr_moder_tx_interval; struct ena_intr_moder_entry *intr_moder_tbl; + + struct ena_com_llq_info llq_info; }; struct ena_com_dev_get_features_ctx { @@ -342,6 +381,7 @@ struct ena_com_dev_get_features_ctx { struct ena_admin_feature_aenq_desc aenq; struct ena_admin_feature_offload_desc offload; struct ena_admin_ena_hw_hints hw_hints; + struct ena_admin_feature_llq_desc llq; }; struct ena_com_create_io_ctx { @@ -397,8 +437,6 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); /* ena_com_admin_init - Init the admin and the async queues * @ena_dev: ENA communication layer struct * @aenq_handlers: Those handlers to be called upon event. - * @init_spinlock: Indicate if this method should init the admin spinlock or - * the spinlock was init before (for example, in a case of FLR). * * Initialize the admin submission and completion queues. * Initialize the asynchronous events notification queues. @@ -406,8 +444,7 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); * @return - 0 on success, negative value on failure. */ int ena_com_admin_init(struct ena_com_dev *ena_dev, - struct ena_aenq_handlers *aenq_handlers, - bool init_spinlock); + struct ena_aenq_handlers *aenq_handlers); /* ena_com_admin_destroy - Destroy the admin and the async events queues. * @ena_dev: ENA communication layer struct @@ -935,6 +972,16 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, enum ena_intr_moder_level level, struct ena_intr_moder_entry *entry); +/* ena_com_config_dev_mode - Configure the placement policy of the device. + * @ena_dev: ENA communication layer struct + * @llq_features: LLQ feature descriptor, retrieve via + * ena_com_get_dev_attr_feat. + * @ena_llq_config: The default driver LLQ parameters configurations + */ +int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq_features, + struct ena_llq_configurations *llq_default_config); + static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) { return ena_dev->adaptive_coalescing; @@ -1044,4 +1091,21 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; } +static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl) +{ + u16 size, buffers_num; + u8 *buf; + + size = bounce_buf_ctrl->buffer_size; + buffers_num = bounce_buf_ctrl->buffers_num; + + buf = bounce_buf_ctrl->base_buffer + + (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size; + + prefetchw(bounce_buf_ctrl->base_buffer + + (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size); + + return buf; +} + #endif /* !(ENA_COM) */ diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h index bb8d73676eab..23beb7e7ed7b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h @@ -32,8 +32,8 @@ #ifndef _ENA_COMMON_H_ #define _ENA_COMMON_H_ -#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */ -#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */ +#define ENA_COMMON_SPEC_VERSION_MAJOR 2 +#define ENA_COMMON_SPEC_VERSION_MINOR 0 /* ENA operates with 48-bit memory addresses. ena_mem_addr_t */ struct ena_common_mem_addr { diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 1c682b76190f..f6c2d3855be8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -59,16 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( return cdesc; } -static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) -{ - io_cq->head++; - - /* Switch phase bit in case of wrap around */ - if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) - io_cq->phase ^= 1; -} - -static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) +static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq) { u16 tail_masked; u32 offset; @@ -80,45 +71,159 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); } -static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq) +static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, + u8 *bounce_buffer) { - u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); - u32 offset = tail_masked * io_sq->desc_entry_size; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; - /* In case this queue isn't a LLQ */ - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) - return; + u16 dst_tail_mask; + u32 dst_offset; - memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset, - io_sq->desc_addr.virt_addr + offset, - io_sq->desc_entry_size); -} + dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); + dst_offset = dst_tail_mask * llq_info->desc_list_entry_size; + + /* Make sure everything was written into the bounce buffer before + * writing the bounce buffer to the device + */ + wmb(); + + /* The line is completed. Copy it to dev */ + __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, + bounce_buffer, (llq_info->desc_list_entry_size) / 8); -static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) -{ io_sq->tail++; /* Switch phase bit in case of wrap around */ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) io_sq->phase ^= 1; + + return 0; } -static inline int ena_com_write_header(struct ena_com_io_sq *io_sq, - u8 *head_src, u16 header_len) +static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, + u8 *header_src, + u16 header_len) { - u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); - u8 __iomem *dev_head_addr = - io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size); + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf; + u16 header_offset; - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) return 0; - if (unlikely(!io_sq->header_addr)) { - pr_err("Push buffer header ptr is NULL\n"); - return -EINVAL; + header_offset = + llq_info->descs_num_before_header * io_sq->desc_entry_size; + + if (unlikely((header_offset + header_len) > + llq_info->desc_list_entry_size)) { + pr_err("trying to write header larger than llq entry can accommodate\n"); + return -EFAULT; + } + + if (unlikely(!bounce_buffer)) { + pr_err("bounce buffer is NULL\n"); + return -EFAULT; + } + + memcpy(bounce_buffer + header_offset, header_src, header_len); + + return 0; +} + +static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + u8 *bounce_buffer; + void *sq_desc; + + bounce_buffer = pkt_ctrl->curr_bounce_buf; + + if (unlikely(!bounce_buffer)) { + pr_err("bounce buffer is NULL\n"); + return NULL; + } + + sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size; + pkt_ctrl->idx++; + pkt_ctrl->descs_left_in_line--; + + return sq_desc; +} + +static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + int rc; + + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) + return 0; + + /* bounce buffer was used, so write it and get a new one */ + if (pkt_ctrl->idx) { + rc = ena_com_write_bounce_buffer_to_dev(io_sq, + pkt_ctrl->curr_bounce_buf); + if (unlikely(rc)) + return rc; + + pkt_ctrl->curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, llq_info->desc_list_entry_size); + } + + pkt_ctrl->idx = 0; + pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header; + return 0; +} + +static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) +{ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + return get_sq_desc_llq(io_sq); + + return get_sq_desc_regular_queue(io_sq); +} + +static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + int rc; + + if (!pkt_ctrl->descs_left_in_line) { + rc = ena_com_write_bounce_buffer_to_dev(io_sq, + pkt_ctrl->curr_bounce_buf); + if (unlikely(rc)) + return rc; + + pkt_ctrl->curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, llq_info->desc_list_entry_size); + + pkt_ctrl->idx = 0; + if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)) + pkt_ctrl->descs_left_in_line = 1; + else + pkt_ctrl->descs_left_in_line = + llq_info->desc_list_entry_size / io_sq->desc_entry_size; } - memcpy_toio(dev_head_addr, head_src, header_len); + return 0; +} + +static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) +{ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + return ena_com_sq_update_llq_tail(io_sq); + + io_sq->tail++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) + io_sq->phase ^= 1; return 0; } @@ -186,8 +291,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, return false; } -static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, - struct ena_com_tx_ctx *ena_tx_ctx) +static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) { struct ena_eth_io_tx_meta_desc *meta_desc = NULL; struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; @@ -232,8 +337,7 @@ static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i memcpy(&io_sq->cached_tx_meta, ena_meta, sizeof(struct ena_com_tx_meta)); - ena_com_copy_curr_sq_desc_to_dev(io_sq); - ena_com_sq_update_tail(io_sq); + return ena_com_sq_update_tail(io_sq); } static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, @@ -245,11 +349,14 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; ena_rx_ctx->l3_csum_err = - (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> - ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT); ena_rx_ctx->l4_csum_err = - (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> - ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT); + ena_rx_ctx->l4_csum_checked = + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT); ena_rx_ctx->hash = cdesc->hash; ena_rx_ctx->frag = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> @@ -271,18 +378,19 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, { struct ena_eth_io_tx_desc *desc = NULL; struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; - void *push_header = ena_tx_ctx->push_header; + void *buffer_to_push = ena_tx_ctx->push_header; u16 header_len = ena_tx_ctx->header_len; u16 num_bufs = ena_tx_ctx->num_bufs; - int total_desc, i, rc; + u16 start_tail = io_sq->tail; + int i, rc; bool have_meta; u64 addr_hi; WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type"); /* num_bufs +1 for potential meta desc */ - if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) { - pr_err("Not enough space in the tx queue\n"); + if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { + pr_debug("Not enough space in the tx queue\n"); return -ENOMEM; } @@ -292,23 +400,32 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, return -EINVAL; } - /* start with pushing the header (if needed) */ - rc = ena_com_write_header(io_sq, push_header, header_len); + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && + !buffer_to_push)) + return -EINVAL; + + rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len); if (unlikely(rc)) return rc; have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq, ena_tx_ctx); - if (have_meta) - ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); + if (have_meta) { + rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); + if (unlikely(rc)) + return rc; + } - /* If the caller doesn't want send packets */ + /* If the caller doesn't want to send packets */ if (unlikely(!num_bufs && !header_len)) { - *nb_hw_desc = have_meta ? 0 : 1; - return 0; + rc = ena_com_close_bounce_buffer(io_sq); + *nb_hw_desc = io_sq->tail - start_tail; + return rc; } desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return -EFAULT; memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); /* Set first desc when we don't have meta descriptor */ @@ -360,10 +477,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, for (i = 0; i < num_bufs; i++) { /* The first desc share the same desc as the header */ if (likely(i != 0)) { - ena_com_copy_curr_sq_desc_to_dev(io_sq); - ena_com_sq_update_tail(io_sq); + rc = ena_com_sq_update_tail(io_sq); + if (unlikely(rc)) + return rc; desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return -EFAULT; + memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); desc->len_ctrl |= (io_sq->phase << @@ -386,15 +507,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, /* set the last desc indicator */ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; - ena_com_copy_curr_sq_desc_to_dev(io_sq); - - ena_com_sq_update_tail(io_sq); + rc = ena_com_sq_update_tail(io_sq); + if (unlikely(rc)) + return rc; - total_desc = max_t(u16, num_bufs, 1); - total_desc += have_meta ? 1 : 0; + rc = ena_com_close_bounce_buffer(io_sq); - *nb_hw_desc = total_desc; - return 0; + *nb_hw_desc = io_sq->tail - start_tail; + return rc; } int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, @@ -453,15 +573,18 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); - if (unlikely(ena_com_sq_empty_space(io_sq) == 0)) + if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1))) return -ENOSPC; desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return -EFAULT; + memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); desc->length = ena_buf->len; - desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK; + desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK; desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK; desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK; desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; @@ -472,43 +595,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, desc->buff_addr_hi = ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); - ena_com_sq_update_tail(io_sq); - - return 0; -} - -int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) -{ - u8 expected_phase, cdesc_phase; - struct ena_eth_io_tx_cdesc *cdesc; - u16 masked_head; - - masked_head = io_cq->head & (io_cq->q_depth - 1); - expected_phase = io_cq->phase; - - cdesc = (struct ena_eth_io_tx_cdesc *) - ((uintptr_t)io_cq->cdesc_addr.virt_addr + - (masked_head * io_cq->cdesc_entry_size_in_bytes)); - - /* When the current completion descriptor phase isn't the same as the - * expected, it mean that the device still didn't update - * this completion. - */ - cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; - if (cdesc_phase != expected_phase) - return -EAGAIN; - - dma_rmb(); - if (unlikely(cdesc->req_id >= io_cq->q_depth)) { - pr_err("Invalid req id %d\n", cdesc->req_id); - return -EINVAL; - } - - ena_com_cq_inc_head(io_cq); - - *req_id = READ_ONCE(cdesc->req_id); - - return 0; + return ena_com_sq_update_tail(io_sq); } bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 2f7657227cfe..340d02b64ca6 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -67,6 +67,7 @@ struct ena_com_rx_ctx { enum ena_eth_io_l4_proto_index l4_proto; bool l3_csum_err; bool l4_csum_err; + u8 l4_csum_checked; /* fragmented packet */ bool frag; u32 hash; @@ -86,8 +87,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, struct ena_com_buf *ena_buf, u16 req_id); -int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id); - bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, @@ -96,7 +95,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, writel(intr_reg->intr_control, io_cq->unmask_reg); } -static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) +static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq) { u16 tail, next_to_comp, cnt; @@ -107,11 +106,28 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) return io_sq->q_depth - 1 - cnt; } -static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) +/* Check if the submission queue has enough space to hold required_buffers */ +static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, + u16 required_buffers) { - u16 tail; + int temp; - tail = io_sq->tail; + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return ena_com_free_desc(io_sq) >= required_buffers; + + /* This calculation doesn't need to be 100% accurate. So to reduce + * the calculation overhead just Subtract 2 lines from the free descs + * (one for the header line and one to compensate the devision + * down calculation. + */ + temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; + + return ena_com_free_desc(io_sq) > temp; +} + +static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) +{ + u16 tail = io_sq->tail; pr_debug("write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail); @@ -159,4 +175,48 @@ static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) io_sq->next_to_comp += elem; } +static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) +{ + io_cq->head++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) + io_cq->phase ^= 1; +} + +static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, + u16 *req_id) +{ + u8 expected_phase, cdesc_phase; + struct ena_eth_io_tx_cdesc *cdesc; + u16 masked_head; + + masked_head = io_cq->head & (io_cq->q_depth - 1); + expected_phase = io_cq->phase; + + cdesc = (struct ena_eth_io_tx_cdesc *) + ((uintptr_t)io_cq->cdesc_addr.virt_addr + + (masked_head * io_cq->cdesc_entry_size_in_bytes)); + + /* When the current completion descriptor phase isn't the same as the + * expected, it mean that the device still didn't update + * this completion. + */ + cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; + if (cdesc_phase != expected_phase) + return -EAGAIN; + + dma_rmb(); + + *req_id = READ_ONCE(cdesc->req_id); + if (unlikely(*req_id >= io_cq->q_depth)) { + pr_err("Invalid req id %d\n", cdesc->req_id); + return -EINVAL; + } + + ena_com_cq_inc_head(io_cq); + + return 0; +} + #endif /* ENA_ETH_COM_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h index f320c58793a5..00e0f056a741 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h @@ -33,25 +33,18 @@ #define _ENA_ETH_IO_H_ enum ena_eth_io_l3_proto_index { - ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, - - ENA_ETH_IO_L3_PROTO_IPV4 = 8, - - ENA_ETH_IO_L3_PROTO_IPV6 = 11, - - ENA_ETH_IO_L3_PROTO_FCOE = 21, - - ENA_ETH_IO_L3_PROTO_ROCE = 22, + ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, + ENA_ETH_IO_L3_PROTO_IPV4 = 8, + ENA_ETH_IO_L3_PROTO_IPV6 = 11, + ENA_ETH_IO_L3_PROTO_FCOE = 21, + ENA_ETH_IO_L3_PROTO_ROCE = 22, }; enum ena_eth_io_l4_proto_index { - ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, - - ENA_ETH_IO_L4_PROTO_TCP = 12, - - ENA_ETH_IO_L4_PROTO_UDP = 13, - - ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, + ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, + ENA_ETH_IO_L4_PROTO_TCP = 12, + ENA_ETH_IO_L4_PROTO_UDP = 13, + ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, }; struct ena_eth_io_tx_desc { @@ -242,9 +235,13 @@ struct ena_eth_io_rx_cdesc_base { * checksum error detected, or, the controller didn't * validate the checksum. This bit is valid only when * l4_proto_idx indicates TCP/UDP packet, and, - * ipv4_frag is not set + * ipv4_frag is not set. This bit is valid only when + * l4_csum_checked below is set. * 15 : ipv4_frag - Indicates IPv4 fragmented packet - * 23:16 : reserved16 + * 16 : l4_csum_checked - L4 checksum was verified + * (could be OK or error), when cleared the status of + * checksum is unknown + * 23:17 : reserved17 - MBZ * 24 : phase * 25 : l3_csum2 - second checksum engine result * 26 : first - Indicates first descriptor in @@ -303,114 +300,116 @@ struct ena_eth_io_numa_node_cfg_reg { }; /* tx_desc */ -#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) -#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 -#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) -#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 -#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) -#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 -#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) -#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 -#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) -#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 -#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) -#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 -#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) -#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) -#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 -#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) -#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 -#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) -#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 -#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) -#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 -#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) -#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 -#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) -#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 -#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) -#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 -#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) -#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 -#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) -#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) -#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 -#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) +#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) +#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 +#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) +#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 +#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) +#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) /* tx_meta_desc */ -#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) -#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 -#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) -#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 -#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) -#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 -#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) -#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 -#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) -#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 -#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) -#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 -#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) -#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 -#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) -#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 -#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) -#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 -#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) -#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) -#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) -#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 -#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) -#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 -#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) -#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 -#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) +#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 +#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) +#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) /* tx_cdesc */ -#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) +#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) /* rx_desc */ -#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) -#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 -#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) -#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 -#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) -#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 -#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) +#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) +#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 +#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) +#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 +#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) +#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 +#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) /* rx_cdesc_base */ -#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) -#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 -#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) -#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 -#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) -#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 -#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) -#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 -#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) -#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 -#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) -#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 -#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) -#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 -#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) -#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 -#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16) +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) /* intr_reg */ -#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) -#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 -#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) -#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 -#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) +#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) /* numa_node_cfg_reg */ -#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) -#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 -#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) #endif /*_ENA_ETH_IO_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 521607bc4393..f3a5a384e6e8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(doorbells), ENA_STAT_TX_ENTRY(prepare_ctx_err), ENA_STAT_TX_ENTRY(bad_req_id), + ENA_STAT_TX_ENTRY(llq_buffer_copy), ENA_STAT_TX_ENTRY(missed_tx), }; @@ -96,6 +97,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(rx_copybreak_pkt), ENA_STAT_RX_ENTRY(bad_req_id), ENA_STAT_RX_ENTRY(empty_rx_ring), + ENA_STAT_RX_ENTRY(csum_unchecked), }; static const struct ena_stats ena_stats_ena_com_strings[] = { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 25621a218f20..18956e7604a3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -39,7 +39,6 @@ #include <linux/if_vlan.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/moduleparam.h> #include <linux/numa.h> #include <linux/pci.h> #include <linux/utsname.h> @@ -238,6 +237,17 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) } } + size = tx_ring->tx_max_header_size; + tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); + if (!tx_ring->push_buf_intermediate_buf) { + tx_ring->push_buf_intermediate_buf = vzalloc(size); + if (!tx_ring->push_buf_intermediate_buf) { + vfree(tx_ring->tx_buffer_info); + vfree(tx_ring->free_tx_ids); + return -ENOMEM; + } + } + /* Req id ring for TX out of order completions */ for (i = 0; i < tx_ring->ring_size; i++) tx_ring->free_tx_ids[i] = i; @@ -266,6 +276,9 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) vfree(tx_ring->free_tx_ids); tx_ring->free_tx_ids = NULL; + + vfree(tx_ring->push_buf_intermediate_buf); + tx_ring->push_buf_intermediate_buf = NULL; } /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues @@ -603,6 +616,36 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter) ena_free_rx_bufs(adapter, i); } +static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring, + struct ena_tx_buffer *tx_info) +{ + struct ena_com_buf *ena_buf; + u32 cnt; + int i; + + ena_buf = tx_info->bufs; + cnt = tx_info->num_of_bufs; + + if (unlikely(!cnt)) + return; + + if (tx_info->map_linear_data) { + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), + DMA_TO_DEVICE); + ena_buf++; + cnt--; + } + + /* unmap remaining mapped pages */ + for (i = 0; i < cnt; i++) { + dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); + ena_buf++; + } +} + /* ena_free_tx_bufs - Free Tx Buffers per Queue * @tx_ring: TX ring for which buffers be freed */ @@ -613,9 +656,6 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) for (i = 0; i < tx_ring->ring_size; i++) { struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; - struct ena_com_buf *ena_buf; - int nr_frags; - int j; if (!tx_info->skb) continue; @@ -631,21 +671,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) tx_ring->qid, i); } - ena_buf = tx_info->bufs; - dma_unmap_single(tx_ring->dev, - ena_buf->paddr, - ena_buf->len, - DMA_TO_DEVICE); - - /* unmap remaining mapped pages */ - nr_frags = tx_info->num_of_bufs - 1; - for (j = 0; j < nr_frags; j++) { - ena_buf++; - dma_unmap_page(tx_ring->dev, - ena_buf->paddr, - ena_buf->len, - DMA_TO_DEVICE); - } + ena_unmap_tx_skb(tx_ring, tx_info); dev_kfree_skb_any(tx_info->skb); } @@ -736,8 +762,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) while (tx_pkts < budget) { struct ena_tx_buffer *tx_info; struct sk_buff *skb; - struct ena_com_buf *ena_buf; - int i, nr_frags; rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id); @@ -757,24 +781,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) tx_info->skb = NULL; tx_info->last_jiffies = 0; - if (likely(tx_info->num_of_bufs != 0)) { - ena_buf = tx_info->bufs; - - dma_unmap_single(tx_ring->dev, - dma_unmap_addr(ena_buf, paddr), - dma_unmap_len(ena_buf, len), - DMA_TO_DEVICE); - - /* unmap remaining mapped pages */ - nr_frags = tx_info->num_of_bufs - 1; - for (i = 0; i < nr_frags; i++) { - ena_buf++; - dma_unmap_page(tx_ring->dev, - dma_unmap_addr(ena_buf, paddr), - dma_unmap_len(ena_buf, len), - DMA_TO_DEVICE); - } - } + ena_unmap_tx_skb(tx_ring, tx_info); netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, "tx_poll: q %d skb %p completed\n", tx_ring->qid, @@ -805,12 +812,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) */ smp_mb(); - above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > - ENA_TX_WAKEUP_THRESH; + above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + ENA_TX_WAKEUP_THRESH); if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { __netif_tx_lock(txq, smp_processor_id()); - above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > - ENA_TX_WAKEUP_THRESH; + above_thresh = + ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + ENA_TX_WAKEUP_THRESH); if (netif_tx_queue_stopped(txq) && above_thresh) { netif_tx_wake_queue(txq); u64_stats_update_begin(&tx_ring->syncp); @@ -986,8 +994,19 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring, return; } - skb->ip_summed = CHECKSUM_UNNECESSARY; + if (likely(ena_rx_ctx->l4_csum_checked)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.csum_unchecked++; + u64_stats_update_end(&rx_ring->syncp); + skb->ip_summed = CHECKSUM_NONE; + } + } else { + skb->ip_summed = CHECKSUM_NONE; + return; } + } static void ena_set_rx_hash(struct ena_ring *rx_ring, @@ -1102,8 +1121,10 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, rx_ring->next_to_clean = next_to_clean; - refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq); - refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER; + refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); + refill_threshold = + min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, + ENA_RX_REFILL_THRESH_PACKET); /* Optimization, try to batch new rx buffers */ if (refill_required > refill_threshold) { @@ -1300,7 +1321,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) /* Reserved the max msix vectors we might need */ msix_vecs = ENA_MAX_MSIX_VEC(num_queues); - netif_dbg(adapter, probe, adapter->netdev, "trying to enable MSI-X, vectors %d\n", msix_vecs); @@ -1575,8 +1595,6 @@ static int ena_up_complete(struct ena_adapter *adapter) if (rc) return rc; - ena_init_napi(adapter); - ena_change_mtu(adapter->netdev, adapter->netdev->mtu); ena_refill_all_rx_bufs(adapter); @@ -1593,7 +1611,7 @@ static int ena_up_complete(struct ena_adapter *adapter) static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) { - struct ena_com_create_io_ctx ctx = { 0 }; + struct ena_com_create_io_ctx ctx; struct ena_com_dev *ena_dev; struct ena_ring *tx_ring; u32 msix_vector; @@ -1606,6 +1624,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) msix_vector = ENA_IO_IRQ_IDX(qid); ena_qid = ENA_IO_TXQ_IDX(qid); + memset(&ctx, 0x0, sizeof(ctx)); + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; ctx.qid = ena_qid; ctx.mem_queue_type = ena_dev->tx_mem_queue_type; @@ -1659,7 +1679,7 @@ create_err: static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) { struct ena_com_dev *ena_dev; - struct ena_com_create_io_ctx ctx = { 0 }; + struct ena_com_create_io_ctx ctx; struct ena_ring *rx_ring; u32 msix_vector; u16 ena_qid; @@ -1671,6 +1691,8 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) msix_vector = ENA_IO_IRQ_IDX(qid); ena_qid = ENA_IO_RXQ_IDX(qid); + memset(&ctx, 0x0, sizeof(ctx)); + ctx.qid = ena_qid; ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; @@ -1730,6 +1752,13 @@ static int ena_up(struct ena_adapter *adapter) ena_setup_io_intr(adapter); + /* napi poll functions should be initialized before running + * request_irq(), to handle a rare condition where there is a pending + * interrupt, causing the ISR to fire immediately while the poll + * function wasn't set yet, causing a null dereference + */ + ena_init_napi(adapter); + rc = ena_request_io_irq(adapter); if (rc) goto err_req_irq; @@ -1981,73 +2010,70 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, return rc; } -/* Called with netif_tx_lock. */ -static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) +static int ena_tx_map_skb(struct ena_ring *tx_ring, + struct ena_tx_buffer *tx_info, + struct sk_buff *skb, + void **push_hdr, + u16 *header_len) { - struct ena_adapter *adapter = netdev_priv(dev); - struct ena_tx_buffer *tx_info; - struct ena_com_tx_ctx ena_tx_ctx; - struct ena_ring *tx_ring; - struct netdev_queue *txq; + struct ena_adapter *adapter = tx_ring->adapter; struct ena_com_buf *ena_buf; - void *push_hdr; - u32 len, last_frag; - u16 next_to_use; - u16 req_id; - u16 push_len; - u16 header_len; dma_addr_t dma; - int qid, rc, nb_hw_desc; - int i = -1; + u32 skb_head_len, frag_len, last_frag; + u16 push_len = 0; + u16 delta = 0; + int i = 0; - netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); - /* Determine which tx ring we will be placed on */ - qid = skb_get_queue_mapping(skb); - tx_ring = &adapter->tx_ring[qid]; - txq = netdev_get_tx_queue(dev, qid); - - rc = ena_check_and_linearize_skb(tx_ring, skb); - if (unlikely(rc)) - goto error_drop_packet; - - skb_tx_timestamp(skb); - len = skb_headlen(skb); - - next_to_use = tx_ring->next_to_use; - req_id = tx_ring->free_tx_ids[next_to_use]; - tx_info = &tx_ring->tx_buffer_info[req_id]; - tx_info->num_of_bufs = 0; - - WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); - ena_buf = tx_info->bufs; + skb_head_len = skb_headlen(skb); tx_info->skb = skb; + ena_buf = tx_info->bufs; if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - /* prepared the push buffer */ - push_len = min_t(u32, len, tx_ring->tx_max_header_size); - header_len = push_len; - push_hdr = skb->data; + /* When the device is LLQ mode, the driver will copy + * the header into the device memory space. + * the ena_com layer assume the header is in a linear + * memory space. + * This assumption might be wrong since part of the header + * can be in the fragmented buffers. + * Use skb_header_pointer to make sure the header is in a + * linear memory space. + */ + + push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); + *push_hdr = skb_header_pointer(skb, 0, push_len, + tx_ring->push_buf_intermediate_buf); + *header_len = push_len; + if (unlikely(skb->data != *push_hdr)) { + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.llq_buffer_copy++; + u64_stats_update_end(&tx_ring->syncp); + + delta = push_len - skb_head_len; + } } else { - push_len = 0; - header_len = min_t(u32, len, tx_ring->tx_max_header_size); - push_hdr = NULL; + *push_hdr = NULL; + *header_len = min_t(u32, skb_head_len, + tx_ring->tx_max_header_size); } - netif_dbg(adapter, tx_queued, dev, + netif_dbg(adapter, tx_queued, adapter->netdev, "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, - push_hdr, push_len); + *push_hdr, push_len); - if (len > push_len) { + if (skb_head_len > push_len) { dma = dma_map_single(tx_ring->dev, skb->data + push_len, - len - push_len, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) + skb_head_len - push_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) goto error_report_dma_error; ena_buf->paddr = dma; - ena_buf->len = len - push_len; + ena_buf->len = skb_head_len - push_len; ena_buf++; tx_info->num_of_bufs++; + tx_info->map_linear_data = 1; + } else { + tx_info->map_linear_data = 0; } last_frag = skb_shinfo(skb)->nr_frags; @@ -2055,18 +2081,75 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < last_frag; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - len = skb_frag_size(frag); - dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, - DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) + frag_len = skb_frag_size(frag); + + if (unlikely(delta >= frag_len)) { + delta -= frag_len; + continue; + } + + dma = skb_frag_dma_map(tx_ring->dev, frag, delta, + frag_len - delta, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) goto error_report_dma_error; ena_buf->paddr = dma; - ena_buf->len = len; + ena_buf->len = frag_len - delta; ena_buf++; + tx_info->num_of_bufs++; + delta = 0; } - tx_info->num_of_bufs += last_frag; + return 0; + +error_report_dma_error: + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.dma_mapping_err++; + u64_stats_update_end(&tx_ring->syncp); + netdev_warn(adapter->netdev, "failed to map skb\n"); + + tx_info->skb = NULL; + + tx_info->num_of_bufs += i; + ena_unmap_tx_skb(tx_ring, tx_info); + + return -EINVAL; +} + +/* Called with netif_tx_lock. */ +static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ena_adapter *adapter = netdev_priv(dev); + struct ena_tx_buffer *tx_info; + struct ena_com_tx_ctx ena_tx_ctx; + struct ena_ring *tx_ring; + struct netdev_queue *txq; + void *push_hdr; + u16 next_to_use, req_id, header_len; + int qid, rc, nb_hw_desc; + + netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); + /* Determine which tx ring we will be placed on */ + qid = skb_get_queue_mapping(skb); + tx_ring = &adapter->tx_ring[qid]; + txq = netdev_get_tx_queue(dev, qid); + + rc = ena_check_and_linearize_skb(tx_ring, skb); + if (unlikely(rc)) + goto error_drop_packet; + + skb_tx_timestamp(skb); + + next_to_use = tx_ring->next_to_use; + req_id = tx_ring->free_tx_ids[next_to_use]; + tx_info = &tx_ring->tx_buffer_info[req_id]; + tx_info->num_of_bufs = 0; + + WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); + + rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); + if (unlikely(rc)) + goto error_drop_packet; memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); ena_tx_ctx.ena_bufs = tx_info->bufs; @@ -2082,14 +2165,22 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, &nb_hw_desc); + /* ena_com_prepare_tx() can't fail due to overflow of tx queue, + * since the number of free descriptors in the queue is checked + * after sending the previous packet. In case there isn't enough + * space in the queue for the next packet, it is stopped + * until there is again enough available space in the queue. + * All other failure reasons of ena_com_prepare_tx() are fatal + * and therefore require a device reset. + */ if (unlikely(rc)) { netif_err(adapter, tx_queued, dev, "failed to prepare tx bufs\n"); u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.queue_stop++; tx_ring->tx_stats.prepare_ctx_err++; u64_stats_update_end(&tx_ring->syncp); - netif_tx_stop_queue(txq); + adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); goto error_unmap_dma; } @@ -2111,8 +2202,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) * to sgl_size + 2. one for the meta descriptor and one for header * (if the header is larger than tx_max_header_size). */ - if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) < - (tx_ring->sgl_size + 2))) { + if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + tx_ring->sgl_size + 2))) { netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", __func__, qid); @@ -2131,8 +2222,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) */ smp_mb(); - if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) - > ENA_TX_WAKEUP_THRESH) { + if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + ENA_TX_WAKEUP_THRESH)) { netif_tx_wake_queue(txq); u64_stats_update_begin(&tx_ring->syncp); tx_ring->tx_stats.queue_wakeup++; @@ -2152,35 +2243,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; -error_report_dma_error: - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.dma_mapping_err++; - u64_stats_update_end(&tx_ring->syncp); - netdev_warn(adapter->netdev, "failed to map skb\n"); - - tx_info->skb = NULL; - error_unmap_dma: - if (i >= 0) { - /* save value of frag that failed */ - last_frag = i; - - /* start back at beginning and unmap skb */ - tx_info->skb = NULL; - ena_buf = tx_info->bufs; - dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), - dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); - - /* unmap remaining mapped pages */ - for (i = 0; i < last_frag; i++) { - ena_buf++; - dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), - dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); - } - } + ena_unmap_tx_skb(tx_ring, tx_info); + tx_info->skb = NULL; error_drop_packet: - dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -2202,7 +2269,8 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, return qid; } -static void ena_config_host_info(struct ena_com_dev *ena_dev) +static void ena_config_host_info(struct ena_com_dev *ena_dev, + struct pci_dev *pdev) { struct ena_admin_host_info *host_info; int rc; @@ -2216,6 +2284,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) host_info = ena_dev->host_attr.host_info; + host_info->bdf = (pdev->bus->number << 8) | pdev->devfn; host_info->os_type = ENA_ADMIN_OS_LINUX; host_info->kernel_ver = LINUX_VERSION_CODE; strncpy(host_info->kernel_ver_str, utsname()->version, @@ -2226,7 +2295,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) host_info->driver_version = (DRV_MODULE_VER_MAJOR) | (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | - (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); + (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) | + ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT); + host_info->num_cpus = num_online_cpus(); rc = ena_com_set_host_attributes(ena_dev); if (rc) { @@ -2437,7 +2508,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, } /* ENA admin level init */ - rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); + rc = ena_com_admin_init(ena_dev, &aenq_handlers); if (rc) { dev_err(dev, "Can not initialize ena admin queue with device\n"); @@ -2450,7 +2521,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, */ ena_com_set_admin_polling_mode(ena_dev, true); - ena_config_host_info(ena_dev); + ena_config_host_info(ena_dev, pdev); /* Get Device Attributes*/ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); @@ -2535,15 +2606,14 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); adapter->dev_up_before_reset = dev_up; - if (!graceful) ena_com_set_admin_running_state(ena_dev, false); if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) ena_down(adapter); - /* Before releasing the ENA resources, a device reset is required. - * (to prevent the device from accessing them). + /* Stop the device from sending AENQ events (in case reset flag is set + * and device is up, ena_close already reset the device * In case the reset flag is set and the device is up, ena_down() * already perform the reset, so it can be skipped. */ @@ -2612,14 +2682,20 @@ static int ena_restore_device(struct ena_adapter *adapter) set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); - dev_err(&pdev->dev, "Device reset completed successfully\n"); + dev_err(&pdev->dev, + "Device reset completed successfully, Driver info: %s\n", + version); return rc; err_disable_msix: ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_device_destroy: + ena_com_abort_admin_commands(ena_dev); + ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); + ena_com_mmio_reg_read_request_destroy(ena_dev); + ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); err: clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); @@ -2801,7 +2877,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter) rx_ring = &adapter->rx_ring[i]; refill_required = - ena_com_sq_empty_space(rx_ring->ena_com_io_sq); + ena_com_free_desc(rx_ring->ena_com_io_sq); if (unlikely(refill_required == (rx_ring->ring_size - 1))) { rx_ring->empty_rx_queue++; @@ -2946,20 +3022,10 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, int io_sq_num, io_queue_num; /* In case of LLQ use the llq number in the get feature cmd */ - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - io_sq_num = get_feat_ctx->max_queues.max_llq_num; - - if (io_sq_num == 0) { - dev_err(&pdev->dev, - "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n"); - - ena_dev->tx_mem_queue_type = - ENA_ADMIN_PLACEMENT_POLICY_HOST; - io_sq_num = get_feat_ctx->max_queues.max_sq_num; - } - } else { + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + io_sq_num = get_feat_ctx->llq.max_llq_num; + else io_sq_num = get_feat_ctx->max_queues.max_sq_num; - } io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); io_queue_num = min_t(int, io_queue_num, io_sq_num); @@ -2975,18 +3041,52 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, return io_queue_num; } -static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev, - struct ena_com_dev_get_features_ctx *get_feat_ctx) +static int ena_set_queues_placement_policy(struct pci_dev *pdev, + struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq, + struct ena_llq_configurations *llq_default_configurations) { bool has_mem_bar; + int rc; + u32 llq_feature_mask; + + llq_feature_mask = 1 << ENA_ADMIN_LLQ; + if (!(ena_dev->supported_features & llq_feature_mask)) { + dev_err(&pdev->dev, + "LLQ is not supported Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); - /* Enable push mode if device supports LLQ */ - if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0)) - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; - else + rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); + if (unlikely(rc)) { + dev_err(&pdev->dev, + "Failed to configure the device mode. Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + /* Nothing to config, exit */ + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return 0; + + if (!has_mem_bar) { + dev_err(&pdev->dev, + "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, ENA_MEM_BAR), + pci_resource_len(pdev, ENA_MEM_BAR)); + + if (!ena_dev->mem_bar) + return -EFAULT; + + return 0; } static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, @@ -3099,18 +3199,20 @@ err_rss_init: static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) { - int release_bars; - - if (ena_dev->mem_bar) - devm_iounmap(&pdev->dev, ena_dev->mem_bar); - - if (ena_dev->reg_bar) - devm_iounmap(&pdev->dev, ena_dev->reg_bar); + int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; - release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; pci_release_selected_regions(pdev, release_bars); } +static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config) +{ + llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; + llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; + llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; + llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; + llq_config->llq_ring_entry_size_value = 128; +} + static int ena_calc_queue_size(struct pci_dev *pdev, struct ena_com_dev *ena_dev, u16 *max_tx_sgl_size, @@ -3126,7 +3228,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev, if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) queue_size = min_t(u32, queue_size, - get_feat_ctx->max_queues.max_llq_depth); + get_feat_ctx->llq.max_llq_depth); queue_size = rounddown_pow_of_two(queue_size); @@ -3159,7 +3261,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int version_printed; struct net_device *netdev; struct ena_adapter *adapter; + struct ena_llq_configurations llq_config; struct ena_com_dev *ena_dev = NULL; + char *queue_type_str; static int adapters_found; int io_queue_num, bars, rc; int queue_size; @@ -3213,16 +3317,13 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free_region; } - ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); + set_default_llq_configurations(&llq_config); - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, - pci_resource_start(pdev, ENA_MEM_BAR), - pci_resource_len(pdev, ENA_MEM_BAR)); - if (!ena_dev->mem_bar) { - rc = -EFAULT; - goto err_device_destroy; - } + rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, + &llq_config); + if (rc) { + dev_err(&pdev->dev, "ena device init failed\n"); + goto err_device_destroy; } /* initial Tx interrupt delay, Assumes 1 usec granularity. @@ -3237,8 +3338,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_device_destroy; } - dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n", - io_queue_num, queue_size); + dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n", + io_queue_num, queue_size, + (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ? + "ENABLED" : "DISABLED"); /* dev zeroed in init_etherdev */ netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); @@ -3328,9 +3431,15 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) timer_setup(&adapter->timer_service, ena_timer_service, 0); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); - dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n", + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + queue_type_str = "Regular"; + else + queue_type_str = "Low Latency"; + + dev_info(&pdev->dev, + "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n", DEVICE_NAME, (long)pci_resource_start(pdev, 0), - netdev->dev_addr, io_queue_num); + netdev->dev_addr, io_queue_num, queue_type_str); set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 7c7ae56c52cf..521873642339 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -43,9 +43,9 @@ #include "ena_com.h" #include "ena_eth_com.h" -#define DRV_MODULE_VER_MAJOR 1 -#define DRV_MODULE_VER_MINOR 5 -#define DRV_MODULE_VER_SUBMINOR 0 +#define DRV_MODULE_VER_MAJOR 2 +#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_SUBMINOR 1 #define DRV_MODULE_NAME "ena" #ifndef DRV_MODULE_VERSION @@ -61,6 +61,17 @@ #define ENA_ADMIN_MSIX_VEC 1 #define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues)) +/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the + * driver passes 0. + * Since the max packet size the ENA handles is ~9kB limit the buffer length to + * 16kB. + */ +#if PAGE_SIZE > SZ_16K +#define ENA_PAGE_SIZE SZ_16K +#else +#define ENA_PAGE_SIZE PAGE_SIZE +#endif + #define ENA_MIN_MSIX_VEC 2 #define ENA_REG_BAR 0 @@ -70,7 +81,7 @@ #define ENA_DEFAULT_RING_SIZE (1024) #define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2) -#define ENA_DEFAULT_RX_COPYBREAK (128 - NET_IP_ALIGN) +#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN) /* limit the buffer size to 600 bytes to handle MTU changes from very * small to very large, in which case the number of buffers per packet @@ -95,10 +106,11 @@ */ #define ENA_TX_POLL_BUDGET_DIVIDER 4 -/* Refill Rx queue when number of available descriptors is below - * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER +/* Refill Rx queue when number of required descriptors is above + * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER or ENA_RX_REFILL_THRESH_PACKET */ #define ENA_RX_REFILL_THRESH_DIVIDER 8 +#define ENA_RX_REFILL_THRESH_PACKET 256 /* Number of queues to check for missing queues per timer service */ #define ENA_MONITORED_TX_QUEUES 4 @@ -151,6 +163,9 @@ struct ena_tx_buffer { /* num of buffers used by this skb */ u32 num_of_bufs; + /* Indicate if bufs[0] map the linear data of the skb. */ + u8 map_linear_data; + /* Used for detect missing tx packets to limit the number of prints */ u32 print_once; /* Save the last jiffies to detect missing tx packets @@ -186,6 +201,7 @@ struct ena_stats_tx { u64 tx_poll; u64 doorbells; u64 bad_req_id; + u64 llq_buffer_copy; u64 missed_tx; }; @@ -201,6 +217,7 @@ struct ena_stats_rx { u64 rx_copybreak_pkt; u64 bad_req_id; u64 empty_rx_ring; + u64 csum_unchecked; }; struct ena_ring { @@ -257,6 +274,8 @@ struct ena_ring { struct ena_stats_tx tx_stats; struct ena_stats_rx rx_stats; }; + + u8 *push_buf_intermediate_buf; int empty_rx_queue; } ____cacheline_aligned; @@ -355,15 +374,4 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf); int ena_get_sset_count(struct net_device *netdev, int sset); -/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the - * driver passas 0. - * Since the max packet size the ENA handles is ~9kB limit the buffer length to - * 16kB. - */ -#if PAGE_SIZE > SZ_16K -#define ENA_PAGE_SIZE SZ_16K -#else -#define ENA_PAGE_SIZE PAGE_SIZE -#endif - #endif /* !(ENA_H) */ diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h index 48ca97fbe7bc..04fcafcc059c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -33,137 +33,125 @@ #define _ENA_REGS_H_ enum ena_regs_reset_reason_types { - ENA_REGS_RESET_NORMAL = 0, - - ENA_REGS_RESET_KEEP_ALIVE_TO = 1, - - ENA_REGS_RESET_ADMIN_TO = 2, - - ENA_REGS_RESET_MISS_TX_CMPL = 3, - - ENA_REGS_RESET_INV_RX_REQ_ID = 4, - - ENA_REGS_RESET_INV_TX_REQ_ID = 5, - - ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6, - - ENA_REGS_RESET_INIT_ERR = 7, - - ENA_REGS_RESET_DRIVER_INVALID_STATE = 8, - - ENA_REGS_RESET_OS_TRIGGER = 9, - - ENA_REGS_RESET_OS_NETDEV_WD = 10, - - ENA_REGS_RESET_SHUTDOWN = 11, - - ENA_REGS_RESET_USER_TRIGGER = 12, - - ENA_REGS_RESET_GENERIC = 13, - - ENA_REGS_RESET_MISS_INTERRUPT = 14, + ENA_REGS_RESET_NORMAL = 0, + ENA_REGS_RESET_KEEP_ALIVE_TO = 1, + ENA_REGS_RESET_ADMIN_TO = 2, + ENA_REGS_RESET_MISS_TX_CMPL = 3, + ENA_REGS_RESET_INV_RX_REQ_ID = 4, + ENA_REGS_RESET_INV_TX_REQ_ID = 5, + ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6, + ENA_REGS_RESET_INIT_ERR = 7, + ENA_REGS_RESET_DRIVER_INVALID_STATE = 8, + ENA_REGS_RESET_OS_TRIGGER = 9, + ENA_REGS_RESET_OS_NETDEV_WD = 10, + ENA_REGS_RESET_SHUTDOWN = 11, + ENA_REGS_RESET_USER_TRIGGER = 12, + ENA_REGS_RESET_GENERIC = 13, + ENA_REGS_RESET_MISS_INTERRUPT = 14, }; /* ena_registers offsets */ -#define ENA_REGS_VERSION_OFF 0x0 -#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 -#define ENA_REGS_CAPS_OFF 0x8 -#define ENA_REGS_CAPS_EXT_OFF 0xc -#define ENA_REGS_AQ_BASE_LO_OFF 0x10 -#define ENA_REGS_AQ_BASE_HI_OFF 0x14 -#define ENA_REGS_AQ_CAPS_OFF 0x18 -#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 -#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 -#define ENA_REGS_ACQ_CAPS_OFF 0x28 -#define ENA_REGS_AQ_DB_OFF 0x2c -#define ENA_REGS_ACQ_TAIL_OFF 0x30 -#define ENA_REGS_AENQ_CAPS_OFF 0x34 -#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 -#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c -#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 -#define ENA_REGS_AENQ_TAIL_OFF 0x44 -#define ENA_REGS_INTR_MASK_OFF 0x4c -#define ENA_REGS_DEV_CTL_OFF 0x54 -#define ENA_REGS_DEV_STS_OFF 0x58 -#define ENA_REGS_MMIO_REG_READ_OFF 0x5c -#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 -#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 + +/* 0 base */ +#define ENA_REGS_VERSION_OFF 0x0 +#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 +#define ENA_REGS_CAPS_OFF 0x8 +#define ENA_REGS_CAPS_EXT_OFF 0xc +#define ENA_REGS_AQ_BASE_LO_OFF 0x10 +#define ENA_REGS_AQ_BASE_HI_OFF 0x14 +#define ENA_REGS_AQ_CAPS_OFF 0x18 +#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 +#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 +#define ENA_REGS_ACQ_CAPS_OFF 0x28 +#define ENA_REGS_AQ_DB_OFF 0x2c +#define ENA_REGS_ACQ_TAIL_OFF 0x30 +#define ENA_REGS_AENQ_CAPS_OFF 0x34 +#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 +#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c +#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 +#define ENA_REGS_AENQ_TAIL_OFF 0x44 +#define ENA_REGS_INTR_MASK_OFF 0x4c +#define ENA_REGS_DEV_CTL_OFF 0x54 +#define ENA_REGS_DEV_STS_OFF 0x58 +#define ENA_REGS_MMIO_REG_READ_OFF 0x5c +#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 +#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 /* version register */ -#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff -#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 -#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 +#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff +#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 +#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 /* controller_version register */ -#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff -#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 -#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 -#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 -#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 -#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 -#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 +#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 /* caps register */ -#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 -#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 -#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e -#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 -#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 -#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16 -#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000 +#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 +#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16 +#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000 /* aq_caps register */ -#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff -#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 -#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 +#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 /* acq_caps register */ -#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff -#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 -#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 +#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 /* aenq_caps register */ -#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff -#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 -#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 +#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 /* dev_ctl register */ -#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 -#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 -#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 -#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 -#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 -#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 -#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 -#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28 -#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000 +#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 +#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 +#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 +#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 +#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 +#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28 +#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000 /* dev_sts register */ -#define ENA_REGS_DEV_STS_READY_MASK 0x1 -#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 -#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 -#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 -#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 -#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 -#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 -#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 -#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 -#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 -#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 +#define ENA_REGS_DEV_STS_READY_MASK 0x1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 +#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 +#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 +#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 +#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 /* mmio_reg_read register */ -#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff -#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 -#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 +#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff +#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 +#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 /* rss_ind_entry_update register */ -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 #endif /*_ENA_REGS_H_ */ diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index 01d132c02ff9..265039c57023 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -440,7 +440,7 @@ static void am79c961_timeout(struct net_device *dev) /* * Transmit a packet */ -static int +static netdev_tx_t am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev) { struct dev_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index c5b81268c284..d3d44e07afbc 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -339,7 +339,8 @@ static unsigned long lance_probe1( struct net_device *dev, struct lance_addr *init_rec ); static int lance_open( struct net_device *dev ); static void lance_init_ring( struct net_device *dev ); -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t lance_interrupt( int irq, void *dev_id ); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); @@ -769,7 +770,8 @@ static void lance_tx_timeout (struct net_device *dev) /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +static netdev_tx_t +lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_ioreg *IO = lp->iobase; diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 73ca8879ada7..7c1eb304c27e 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -564,17 +564,7 @@ static int au1000_mii_probe(struct net_device *dev) return PTR_ERR(phydev); } - /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg - /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */ - | SUPPORTED_MII - | SUPPORTED_TP); - - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, SPEED_100); aup->old_link = 0; aup->old_speed = 0; diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 00332a1ea84b..9f23703dd509 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -894,7 +894,7 @@ static void lance_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_regs *ll = lp->ll; diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index e248d1ab3e47..8931ce6bab7b 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -435,10 +435,8 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr) } if(cards[i].vendor_id) { for(j=0;j<3;j++) - if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) { + if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) release_region(ioaddr, cards[i].total_size); - continue; - } } break; } diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index 77b1db267730..da7e3d4f4166 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -236,7 +236,8 @@ struct lance_private { static int lance_probe( struct net_device *dev); static int lance_open( struct net_device *dev ); static void lance_init_ring( struct net_device *dev ); -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t lance_interrupt( int irq, void *dev_id); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); @@ -511,7 +512,8 @@ static void lance_init_ring( struct net_device *dev ) } -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +static netdev_tx_t +lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int entry, len; diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index cdd7a611479b..b4fc0ed5bce8 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1106,7 +1106,7 @@ static void lance_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int entry, skblen, len; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 24f1053b8785..d96a84a62d78 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2009,7 +2009,7 @@ static int xgbe_close(struct net_device *netdev) return 0; } -static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; @@ -2018,7 +2018,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) struct xgbe_ring *ring; struct xgbe_packet_data *packet; struct netdev_queue *txq; - int ret; + netdev_tx_t ret; DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 3ceb4f95ca7c..151bdb629e8a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -878,9 +878,10 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) phy_write(phy_data->phydev, 0x04, 0x0d01); phy_write(phy_data->phydev, 0x00, 0x9140); - phy_data->phydev->supported = PHY_GBIT_FEATURES; - phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phy_data->phydev->advertising = phy_data->phydev->supported; + phy_data->phydev->supported = PHY_10BT_FEATURES | + PHY_100BT_FEATURES | + PHY_1000BT_FEATURES; + phy_support_asym_pause(phy_data->phydev); netif_dbg(pdata, drv, pdata->netdev, "Finisar PHY quirk in place\n"); @@ -950,9 +951,10 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) reg = phy_read(phy_data->phydev, 0x00); phy_write(phy_data->phydev, 0x00, reg & ~0x00800); - phy_data->phydev->supported = PHY_GBIT_FEATURES; - phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phy_data->phydev->advertising = phy_data->phydev->supported; + phy_data->phydev->supported = (PHY_10BT_FEATURES | + PHY_100BT_FEATURES | + PHY_1000BT_FEATURES); + phy_support_asym_pause(phy_data->phydev); netif_dbg(pdata, drv, pdata->netdev, "BelFuse PHY quirk in place\n"); @@ -1495,10 +1497,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) if (!phy_data->phydev) return; - if (phy_data->phydev->advertising & ADVERTISED_Pause) - lcl_adv |= ADVERTISE_PAUSE_CAP; - if (phy_data->phydev->advertising & ADVERTISED_Asym_Pause) - lcl_adv |= ADVERTISE_PAUSE_ASYM; + lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising); if (phy_data->phydev->pause) { XGBE_SET_LP_ADV(lks, Pause); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c index 4f50f11718f4..78dd09b5beeb 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c @@ -306,45 +306,25 @@ static int xgene_set_pauseparam(struct net_device *ndev, { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; - u32 oldadv, newadv; if (phy_interface_mode_is_rgmii(pdata->phy_mode) || pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { if (!phydev) return -EINVAL; - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - pp->rx_pause != pp->tx_pause)) + if (!phy_validate_pause(phydev, pp)) return -EINVAL; pdata->pause_autoneg = pp->autoneg; pdata->tx_pause = pp->tx_pause; pdata->rx_pause = pp->rx_pause; - oldadv = phydev->advertising; - newadv = oldadv & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + phy_set_asym_pause(phydev, pp->rx_pause, pp->tx_pause); - if (pp->rx_pause) - newadv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - - if (pp->tx_pause) - newadv ^= ADVERTISED_Asym_Pause; - - if (oldadv ^ newadv) { - phydev->advertising = newadv; - - if (phydev->autoneg) - return phy_start_aneg(phydev); - - if (!pp->autoneg) { - pdata->mac_ops->flowctl_tx(pdata, - pdata->tx_pause); - pdata->mac_ops->flowctl_rx(pdata, - pdata->rx_pause); - } + if (!pp->autoneg) { + pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause); + pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause); } - } else { if (pp->autoneg) return -EINVAL; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 078a04dc1182..e3560311711a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -895,12 +895,10 @@ int xgene_enet_phy_connect(struct net_device *ndev) } pdata->phy_speed = SPEED_UNKNOWN; - phy_dev->supported &= ~SUPPORTED_10baseT_Half & - ~SUPPORTED_100baseT_Half & - ~SUPPORTED_1000baseT_Half; - phy_dev->supported |= SUPPORTED_Pause | - SUPPORTED_Asym_Pause; - phy_dev->advertising = phy_dev->supported; + phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_support_asym_pause(phy_dev); return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h index d52b088ff8f0..becb578211ed 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -57,4 +57,9 @@ #define AQ_NIC_RATE_1G BIT(4) #define AQ_NIC_RATE_100M BIT(5) +#define AQ_NIC_RATE_EEE_10G BIT(6) +#define AQ_NIC_RATE_EEE_5G BIT(7) +#define AQ_NIC_RATE_EEE_2GS BIT(8) +#define AQ_NIC_RATE_EEE_1G BIT(9) + #endif /* AQ_COMMON_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 08c9fa6ca71f..6a633c70f603 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -98,8 +98,8 @@ static void aq_ethtool_stats(struct net_device *ndev, struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) + - ARRAY_SIZE(aq_ethtool_queue_stat_names) * - cfg->vecs) * sizeof(u64)); + ARRAY_SIZE(aq_ethtool_queue_stat_names) * + cfg->vecs) * sizeof(u64)); aq_nic_get_stats(aq_nic, data); } @@ -285,6 +285,111 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev, return aq_nic_update_interrupt_moderation_settings(aq_nic); } +static void aq_ethtool_get_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + + if (cfg->wol) + wol->wolopts |= WAKE_MAGIC; +} + +static int aq_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + struct pci_dev *pdev = to_pci_dev(ndev->dev.parent); + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + int err = 0; + + if (wol->wolopts & WAKE_MAGIC) + cfg->wol |= AQ_NIC_WOL_ENABLED; + else + cfg->wol &= ~AQ_NIC_WOL_ENABLED; + err = device_set_wakeup_enable(&pdev->dev, wol->wolopts); + + return err; +} + +static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed) +{ + u32 rate = 0; + + if (speed & AQ_NIC_RATE_EEE_10G) + rate |= SUPPORTED_10000baseT_Full; + + if (speed & AQ_NIC_RATE_EEE_2GS) + rate |= SUPPORTED_2500baseX_Full; + + if (speed & AQ_NIC_RATE_EEE_1G) + rate |= SUPPORTED_1000baseT_Full; + + return rate; +} + +static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 rate, supported_rates; + int err = 0; + + if (!aq_nic->aq_fw_ops->get_eee_rate) + return -EOPNOTSUPP; + + err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate, + &supported_rates); + if (err < 0) + return err; + + eee->supported = eee_mask_to_ethtool_mask(supported_rates); + + if (aq_nic->aq_nic_cfg.eee_speeds) + eee->advertised = eee->supported; + + eee->lp_advertised = eee_mask_to_ethtool_mask(rate); + + eee->eee_enabled = !!eee->advertised; + + eee->tx_lpi_enabled = eee->eee_enabled; + if (eee->advertised & eee->lp_advertised) + eee->eee_active = true; + + return 0; +} + +static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 rate, supported_rates; + struct aq_nic_cfg_s *cfg; + int err = 0; + + cfg = aq_nic_get_cfg(aq_nic); + + if (unlikely(!aq_nic->aq_fw_ops->get_eee_rate || + !aq_nic->aq_fw_ops->set_eee_rate)) + return -EOPNOTSUPP; + + err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate, + &supported_rates); + if (err < 0) + return err; + + if (eee->eee_enabled) { + rate = supported_rates; + cfg->eee_speeds = rate; + } else { + rate = 0; + cfg->eee_speeds = 0; + } + + return aq_nic->aq_fw_ops->set_eee_rate(aq_nic->aq_hw, rate); +} + static int aq_ethtool_nway_reset(struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); @@ -403,9 +508,13 @@ const struct ethtool_ops aq_ethtool_ops = { .get_drvinfo = aq_ethtool_get_drvinfo, .get_strings = aq_ethtool_get_strings, .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, + .get_wol = aq_ethtool_get_wol, + .set_wol = aq_ethtool_set_wol, .nway_reset = aq_ethtool_nway_reset, .get_ringparam = aq_get_ringparam, .set_ringparam = aq_set_ringparam, + .get_eee = aq_ethtool_get_eee, + .set_eee = aq_ethtool_set_eee, .get_pauseparam = aq_ethtool_get_pauseparam, .set_pauseparam = aq_ethtool_set_pauseparam, .get_rxfh_key_size = aq_ethtool_get_rss_key_size, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 5c00671f248d..e8689241204e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -112,7 +112,7 @@ struct aq_hw_s { const struct aq_fw_ops *aq_fw_ops; void __iomem *mmio; struct aq_hw_link_status_s aq_link_status; - struct hw_aq_atl_utils_mbox mbox; + struct hw_atl_utils_mbox mbox; struct hw_atl_stats_s last_stats; struct aq_stats_s curr_stats; u64 speed; @@ -124,7 +124,7 @@ struct aq_hw_s { u32 mbox_addr; u32 rpc_addr; u32 rpc_tid; - struct hw_aq_atl_utils_fw_rpc rpc; + struct hw_atl_utils_fw_rpc rpc; }; struct aq_ring_s; @@ -204,7 +204,6 @@ struct aq_hw_ops { int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); - int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); }; struct aq_fw_ops { @@ -228,6 +227,14 @@ struct aq_fw_ops { int (*update_stats)(struct aq_hw_s *self); int (*set_flow_control)(struct aq_hw_s *self); + + int (*set_power)(struct aq_hw_s *self, unsigned int power_state, + u8 *mac); + + int (*set_eee_rate)(struct aq_hw_s *self, u32 speed); + + int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate, + u32 *supported_rates); }; #endif /* AQ_HW_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 26dc6782b475..5fed24446687 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -189,7 +189,7 @@ static void aq_nic_polling_timer_cb(struct timer_list *t) aq_vec_isr(i, (void *)aq_vec); mod_timer(&self->polling_timer, jiffies + - AQ_CFG_POLLING_TIMER_INTERVAL); + AQ_CFG_POLLING_TIMER_INTERVAL); } int aq_nic_ndev_register(struct aq_nic_s *self) @@ -301,13 +301,13 @@ int aq_nic_start(struct aq_nic_s *self) unsigned int i = 0U; err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, - self->mc_list.ar, - self->mc_list.count); + self->mc_list.ar, + self->mc_list.count); if (err < 0) goto err_exit; err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, - self->packet_filter); + self->packet_filter); if (err < 0) goto err_exit; @@ -327,7 +327,7 @@ int aq_nic_start(struct aq_nic_s *self) goto err_exit; timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); mod_timer(&self->service_timer, jiffies + - AQ_CFG_SERVICE_TIMER_INTERVAL); + AQ_CFG_SERVICE_TIMER_INTERVAL); if (self->aq_nic_cfg.is_polling) { timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0); @@ -344,7 +344,7 @@ int aq_nic_start(struct aq_nic_s *self) } err = self->aq_hw_ops->hw_irq_enable(self->aq_hw, - AQ_CFG_IRQ_MASK); + AQ_CFG_IRQ_MASK); if (err < 0) goto err_exit; } @@ -889,11 +889,13 @@ void aq_nic_deinit(struct aq_nic_s *self) self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) aq_vec_deinit(aq_vec); - if (self->power_state == AQ_HW_POWER_STATE_D0) { - (void)self->aq_fw_ops->deinit(self->aq_hw); - } else { - (void)self->aq_hw_ops->hw_set_power(self->aq_hw, - self->power_state); + self->aq_fw_ops->deinit(self->aq_hw); + + if (self->power_state != AQ_HW_POWER_STATE_D0 || + self->aq_hw->aq_nic_cfg->wol) { + self->aq_fw_ops->set_power(self->aq_hw, + self->power_state, + self->ndev->dev_addr); } err_exit:; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index fecfc401f95d..c1582f4e8e1b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -36,6 +36,7 @@ struct aq_nic_cfg_s { u32 flow_control; u32 link_speed_msk; u32 vlan_id; + u32 wol; u16 is_mc_list_enabled; u16 mc_list_count; bool is_autoneg; @@ -44,6 +45,7 @@ struct aq_nic_cfg_s { bool is_lro; u8 tcs; struct aq_rss_parameters aq_rss; + u32 eee_speeds; }; #define AQ_NIC_FLAG_STARTED 0x00000004U @@ -54,6 +56,8 @@ struct aq_nic_cfg_s { #define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U #define AQ_NIC_FLAG_ERR_HW 0x80000000U +#define AQ_NIC_WOL_ENABLED BIT(0) + #define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \ ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_)) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 750007513f9d..1d5d6b8df855 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -84,7 +84,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev, const struct aq_hw_ops **ops, const struct aq_hw_caps_s **caps) { - int i = 0; + int i; if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA) return -EINVAL; @@ -107,7 +107,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev, int aq_pci_func_init(struct pci_dev *pdev) { - int err = 0; + int err; err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (!err) { @@ -141,7 +141,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, char *name, void *aq_vec, cpumask_t *affinity_mask) { struct pci_dev *pdev = self->pdev; - int err = 0; + int err; if (pdev->msix_enabled || pdev->msi_enabled) err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0, @@ -164,7 +164,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, void aq_pci_func_free_irqs(struct aq_nic_s *self) { struct pci_dev *pdev = self->pdev; - unsigned int i = 0U; + unsigned int i; for (i = 32U; i--;) { if (!((1U << i) & self->msix_entry_mask)) @@ -194,8 +194,8 @@ static void aq_pci_free_irq_vectors(struct aq_nic_s *self) static int aq_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { - struct aq_nic_s *self = NULL; - int err = 0; + struct aq_nic_s *self; + int err; struct net_device *ndev; resource_size_t mmio_pa; u32 bar; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index d1e1a0ba8615..3db91446cc67 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -29,8 +29,8 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, goto err_exit; } self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), - self->size * self->dx_size, - &self->dx_ring_pa, GFP_KERNEL); + self->size * self->dx_size, + &self->dx_ring_pa, GFP_KERNEL); if (!self->dx_ring) { err = -ENOMEM; goto err_exit; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 97addfa6f895..2469ed4d86b9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -49,37 +49,37 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = { DEFAULT_A0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_FIBRE, - .link_speed_msk = HW_ATL_A0_RATE_5G | - HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = { DEFAULT_A0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_A0_RATE_10G | - HW_ATL_A0_RATE_5G | - HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_10G | + AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = { DEFAULT_A0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_A0_RATE_5G | - HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = { DEFAULT_A0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; static int hw_atl_a0_hw_reset(struct aq_hw_s *self) @@ -284,7 +284,7 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) /* RSS Ring selection */ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ? - 0xB3333333U : 0x00000000U); + 0xB3333333U : 0x00000000U); /* Multicast filters */ for (i = HW_ATL_A0_MAC_MAX; i--;) { @@ -325,7 +325,7 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) } h = (mac_addr[0] << 8) | (mac_addr[1]); l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | - (mac_addr[4] << 8) | mac_addr[5]; + (mac_addr[4] << 8) | mac_addr[5]; hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC); hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC); @@ -519,7 +519,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, hw_atl_rdm_rx_desc_data_buff_size_set(self, AQ_CFG_RX_FRAME_MAX / 1024U, - aq_ring->idx); + aq_ring->idx); hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); @@ -758,7 +758,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, hw_atl_rpfl2_uc_flr_en_set(self, (self->aq_nic_cfg->is_mc_list_enabled && (i <= self->aq_nic_cfg->mc_list_count)) ? - 1U : 0U, i); + 1U : 0U, i); return aq_hw_err_from_flags(self); } @@ -877,7 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, const struct aq_hw_ops hw_atl_ops_a0 = { .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, .hw_init = hw_atl_a0_hw_init, - .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_a0_hw_reset, .hw_start = hw_atl_a0_hw_start, .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h index 3c94cff57876..a021dc431ef7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -62,12 +62,6 @@ #define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU #define HW_ATL_A0_MPI_SPEED_SHIFT 16U -#define HW_ATL_A0_RATE_10G BIT(0) -#define HW_ATL_A0_RATE_5G BIT(1) -#define HW_ATL_A0_RATE_2G5 BIT(3) -#define HW_ATL_A0_RATE_1G BIT(4) -#define HW_ATL_A0_RATE_100M BIT(5) - #define HW_ATL_A0_TXBUF_MAX 160U #define HW_ATL_A0_RXBUF_MAX 320U diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 1d44a386e7d3..76d25d594a0f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -51,38 +51,38 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_FIBRE, - .link_speed_msk = HW_ATL_B0_RATE_10G | - HW_ATL_B0_RATE_5G | - HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_10G | + AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_B0_RATE_10G | - HW_ATL_B0_RATE_5G | - HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_10G | + AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_B0_RATE_5G | - HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; static int hw_atl_b0_hw_reset(struct aq_hw_s *self) @@ -935,7 +935,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, const struct aq_hw_ops hw_atl_ops_b0 = { .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, .hw_init = hw_atl_b0_hw_init, - .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_b0_hw_reset, .hw_start = hw_atl_b0_hw_start, .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index 28568f5fa74b..b318eefd36ae 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -67,12 +67,6 @@ #define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU #define HW_ATL_B0_MPI_SPEED_SHIFT 16U -#define HW_ATL_B0_RATE_10G BIT(0) -#define HW_ATL_B0_RATE_5G BIT(1) -#define HW_ATL_B0_RATE_2G5 BIT(3) -#define HW_ATL_B0_RATE_1G BIT(4) -#define HW_ATL_B0_RATE_100M BIT(5) - #define HW_ATL_B0_TXBUF_MAX 160U #define HW_ATL_B0_RXBUF_MAX 320U diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 10ba035dadb1..be0a3a90dfad 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -1460,3 +1460,11 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp), glb_cpu_scratch_scp); } + +void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR, + HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK, + HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT, + up_force_intr); +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index dfb426f2dc2c..7056c7342afc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -698,4 +698,7 @@ void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); /* set pci register reset disable */ void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); +/* set uP Force Interrupt */ +void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr); + #endif /* HW_ATL_LLH_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index e0cf70120f1d..716674a9b729 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -2387,4 +2387,17 @@ #define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \ (0x00000300u + (scratch_scp) * 0x4) +/* register address for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR 0x00000404 +/* bitmask for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK 0x00000002 +/* inverted bitmask for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSKN 0xFFFFFFFD +/* lower bit position of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT 1 +/* width of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_WIDTH 1 +/* default value of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0 + #endif /* HW_ATL_LLH_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index c965e65d07db..7def1cb8ab9d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -49,6 +49,7 @@ #define FORCE_FLASHLESS 0 static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual); + static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); @@ -69,10 +70,10 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) self->fw_ver_actual) == 0) { *fw_ops = &aq_fw_1x_ops; } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, - self->fw_ver_actual) == 0) { + self->fw_ver_actual) == 0) { *fw_ops = &aq_fw_2x_ops; } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, - self->fw_ver_actual) == 0) { + self->fw_ver_actual) == 0) { *fw_ops = &aq_fw_2x_ops; } else { aq_pr_err("Bad FW version detected: %x\n", @@ -260,7 +261,7 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self) hw_atl_utils_mpi_set_state(self, MPI_DEINIT); AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) & - HW_ATL_MPI_STATE_MSK) == MPI_DEINIT, + HW_ATL_MPI_STATE_MSK) == MPI_DEINIT, 10, 1000U); } @@ -277,7 +278,7 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM) == 1U, - 1U, 10000U); + 1U, 10000U); if (err < 0) { bool is_locked; @@ -325,17 +326,31 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, err = -ETIME; goto err_exit; } + if (IS_CHIP_FEATURE(REVISION_B1)) { + u32 offset = 0; + + for (; offset < cnt; ++offset) { + aq_hw_write_reg(self, 0x328, p[offset]); + aq_hw_write_reg(self, 0x32C, + (0x80000000 | (0xFFFF & (offset * 4)))); + hw_atl_mcp_up_force_intr_set(self, 1); + /* 1000 times by 10us = 10ms */ + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, + 0x32C) & 0xF0000000) != + 0x80000000, + 10, 1000); + } + } else { + u32 offset = 0; - aq_hw_write_reg(self, 0x00000208U, a); - - for (++cnt; --cnt;) { - u32 i = 0U; + aq_hw_write_reg(self, 0x208, a); - aq_hw_write_reg(self, 0x0000020CU, *(p++)); - aq_hw_write_reg(self, 0x00000200U, 0xC000U); + for (; offset < cnt; ++offset) { + aq_hw_write_reg(self, 0x20C, p[offset]); + aq_hw_write_reg(self, 0x200, 0xC000); - for (i = 1024U; - (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, 0x200U) & + 0x100) == 0, 10, 1000); } } @@ -379,7 +394,7 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self, /* check 10 times by 1ms */ AQ_HW_WAIT_FOR(0U != (self->mbox_addr = - aq_hw_read_reg(self, 0x360U)), 1000U, 10U); + aq_hw_read_reg(self, 0x360U)), 1000U, 10U); return err; } @@ -399,7 +414,7 @@ struct aq_hw_atl_utils_fw_rpc_tid_s { #define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL) -static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) +int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) { int err = 0; struct aq_hw_atl_utils_fw_rpc_tid_s sw; @@ -411,7 +426,7 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, (u32 *)(void *)&self->rpc, (rpc_size + sizeof(u32) - - sizeof(u8)) / sizeof(u32)); + sizeof(u8)) / sizeof(u32)); if (err < 0) goto err_exit; @@ -423,8 +438,8 @@ err_exit: return err; } -static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, - struct hw_aq_atl_utils_fw_rpc **rpc) +int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_atl_utils_fw_rpc **rpc) { int err = 0; struct aq_hw_atl_utils_fw_rpc_tid_s sw; @@ -436,7 +451,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, self->rpc_tid = sw.tid; AQ_HW_WAIT_FOR(sw.tid == - (fw.val = + (fw.val = aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR), fw.tid), 1000U, 100U); if (err < 0) @@ -459,7 +474,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, (u32 *)(void *) &self->rpc, (fw.len + sizeof(u32) - - sizeof(u8)) / + sizeof(u8)) / sizeof(u32)); if (err < 0) goto err_exit; @@ -489,16 +504,16 @@ err_exit: } int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, - struct hw_aq_atl_utils_mbox_header *pmbox) + struct hw_atl_utils_mbox_header *pmbox) { return hw_atl_utils_fw_downld_dwords(self, - self->mbox_addr, - (u32 *)(void *)pmbox, - sizeof(*pmbox) / sizeof(u32)); + self->mbox_addr, + (u32 *)(void *)pmbox, + sizeof(*pmbox) / sizeof(u32)); } void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, - struct hw_aq_atl_utils_mbox *pmbox) + struct hw_atl_utils_mbox *pmbox) { int err = 0; @@ -538,7 +553,7 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, { int err = 0; u32 transaction_id = 0; - struct hw_aq_atl_utils_mbox_header mbox; + struct hw_atl_utils_mbox_header mbox; u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); if (state == MPI_RESET) { @@ -547,8 +562,8 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, transaction_id = mbox.transaction_id; AQ_HW_WAIT_FOR(transaction_id != - (hw_atl_utils_mpi_read_mbox(self, &mbox), - mbox.transaction_id), + (hw_atl_utils_mpi_read_mbox(self, &mbox), + mbox.transaction_id), 1000U, 100U); if (err < 0) goto err_exit; @@ -645,9 +660,9 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) { /* chip revision */ - l = 0xE3000000U - | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) - | (0x00 << 16); + l = 0xE3000000U | + (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) | + (0x00 << 16); h = 0x8001300EU; mac[5] = (u8)(0xFFU & l); @@ -730,17 +745,9 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self) return 0; } -int hw_atl_utils_hw_set_power(struct aq_hw_s *self, - unsigned int power_state) -{ - hw_atl_utils_mpi_set_speed(self, 0); - hw_atl_utils_mpi_set_state(self, MPI_POWER); - return 0; -} - int hw_atl_utils_update_stats(struct aq_hw_s *self) { - struct hw_aq_atl_utils_mbox mbox; + struct hw_atl_utils_mbox mbox; hw_atl_utils_mpi_read_stats(self, &mbox); @@ -825,6 +832,81 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) return 0; } +static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac) +{ + struct hw_atl_utils_fw_rpc *prpc = NULL; + unsigned int rpc_size = 0U; + int err = 0; + + err = hw_atl_utils_fw_rpc_wait(self, &prpc); + if (err < 0) + goto err_exit; + + memset(prpc, 0, sizeof(*prpc)); + + if (wol_enabled) { + rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_wol); + + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD; + prpc->msg_wol.priority = + HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR; + prpc->msg_wol.pattern_id = + HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN; + prpc->msg_wol.wol_packet_type = + HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT; + + ether_addr_copy((u8 *)&prpc->msg_wol.wol_pattern, mac); + } else { + rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_del_id); + + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL; + prpc->msg_wol.pattern_id = + HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN; + } + + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + +err_exit: + return err; +} + +static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state, + u8 *mac) +{ + struct hw_atl_utils_fw_rpc *prpc = NULL; + unsigned int rpc_size = 0U; + int err = 0; + + if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) { + err = aq_fw1x_set_wol(self, 1, mac); + + if (err < 0) + goto err_exit; + + rpc_size = sizeof(prpc->msg_id) + + sizeof(prpc->msg_enable_wakeup); + + err = hw_atl_utils_fw_rpc_wait(self, &prpc); + + if (err < 0) + goto err_exit; + + memset(prpc, 0, rpc_size); + + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP; + prpc->msg_enable_wakeup.pattern_mask = 0x00000002; + + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + if (err < 0) + goto err_exit; + } + hw_atl_utils_mpi_set_speed(self, 0); + hw_atl_utils_mpi_set_state(self, MPI_POWER); + +err_exit: + return err; +} + const struct aq_fw_ops aq_fw_1x_ops = { .init = hw_atl_utils_mpi_create, .deinit = hw_atl_fw1x_deinit, @@ -834,5 +916,8 @@ const struct aq_fw_ops aq_fw_1x_ops = { .set_state = hw_atl_utils_mpi_set_state, .update_link_status = hw_atl_utils_mpi_get_link_status, .update_stats = hw_atl_utils_update_stats, + .set_power = aq_fw1x_set_power, + .set_eee_rate = NULL, + .get_eee_rate = NULL, .set_flow_control = NULL, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index b875590efcbd..3613fca64b58 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -75,7 +75,7 @@ union __packed ip_addr { } v4; }; -struct __packed hw_aq_atl_utils_fw_rpc { +struct __packed hw_atl_utils_fw_rpc { u32 msg_id; union { @@ -101,8 +101,6 @@ struct __packed hw_aq_atl_utils_fw_rpc { struct { u32 priority; u32 wol_packet_type; - u16 friendly_name_len; - u16 friendly_name[65]; u32 pattern_id; u32 next_wol_pattern_offset; @@ -134,25 +132,112 @@ struct __packed hw_aq_atl_utils_fw_rpc { u32 pattern_offset; u32 pattern_size; } wol_bit_map_pattern; + + struct { + u8 mac_addr[ETH_ALEN]; + } wol_magic_packet_patter; } wol_pattern; } msg_wol; struct { - u32 is_wake_on_link_down; - u32 is_wake_on_link_up; - } msg_wolink; + union { + u32 pattern_mask; + + struct { + u32 reason_arp_v4_pkt : 1; + u32 reason_ipv4_ping_pkt : 1; + u32 reason_ipv6_ns_pkt : 1; + u32 reason_ipv6_ping_pkt : 1; + u32 reason_link_up : 1; + u32 reason_link_down : 1; + u32 reason_maximum : 1; + }; + }; + + union { + u32 offload_mask; + }; + } msg_enable_wakeup; + + struct { + u32 id; + } msg_del_id; }; }; -struct __packed hw_aq_atl_utils_mbox_header { +struct __packed hw_atl_utils_mbox_header { u32 version; u32 transaction_id; u32 error; }; -struct __packed hw_aq_atl_utils_mbox { - struct hw_aq_atl_utils_mbox_header header; +struct __packed hw_aq_info { + u8 reserved[6]; + u16 phy_fault_code; + u16 phy_temperature; + u8 cable_len; + u8 reserved1; + u32 cable_diag_data[4]; + u8 reserved2[32]; + u32 caps_lo; + u32 caps_hi; +}; + +struct __packed hw_atl_utils_mbox { + struct hw_atl_utils_mbox_header header; struct hw_atl_stats_s stats; + struct hw_aq_info info; +}; + +/* fw2x */ +typedef u32 fw_offset_t; + +struct __packed offload_ip_info { + u8 v4_local_addr_count; + u8 v4_addr_count; + u8 v6_local_addr_count; + u8 v6_addr_count; + fw_offset_t v4_addr; + fw_offset_t v4_prefix; + fw_offset_t v6_addr; + fw_offset_t v6_prefix; +}; + +struct __packed offload_port_info { + u16 udp_port_count; + u16 tcp_port_count; + fw_offset_t udp_port; + fw_offset_t tcp_port; +}; + +struct __packed offload_ka_info { + u16 v4_ka_count; + u16 v6_ka_count; + u32 retry_count; + u32 retry_interval; + fw_offset_t v4_ka; + fw_offset_t v6_ka; +}; + +struct __packed offload_rr_info { + u32 rr_count; + u32 rr_buf_len; + fw_offset_t rr_id_x; + fw_offset_t rr_buf; +}; + +struct __packed offload_info { + u32 version; + u32 len; + u8 mac_addr[ETH_ALEN]; + + u8 reserved[2]; + + struct offload_ip_info ips; + struct offload_port_info ports; + struct offload_ka_info kas; + struct offload_rr_info rrs; + u8 buf[0]; }; #define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U @@ -181,6 +266,21 @@ enum hal_atl_utils_fw_state_e { #define HAL_ATLANTIC_RATE_100M BIT(5) #define HAL_ATLANTIC_RATE_INVALID BIT(6) +#define HAL_ATLANTIC_UTILS_FW_MSG_PING 0x1U +#define HAL_ATLANTIC_UTILS_FW_MSG_ARP 0x2U +#define HAL_ATLANTIC_UTILS_FW_MSG_INJECT 0x3U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD 0x4U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR 0x10000000U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN 0x1U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT 0x2U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL 0x5U +#define HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP 0x6U +#define HAL_ATLANTIC_UTILS_FW_MSG_MSM_PFC 0x7U +#define HAL_ATLANTIC_UTILS_FW_MSG_PROVISIONING 0x8U +#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_ADD 0x9U +#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 0xAU +#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 0xDU + enum hw_atl_fw2x_rate { FW2X_RATE_100M = 0x20, FW2X_RATE_1G = 0x100, @@ -286,10 +386,10 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self); void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, - struct hw_aq_atl_utils_mbox_header *pmbox); + struct hw_atl_utils_mbox_header *pmbox); void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, - struct hw_aq_atl_utils_mbox *pmbox); + struct hw_atl_utils_mbox *pmbox); void hw_atl_utils_mpi_set(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state, @@ -316,9 +416,17 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); int hw_atl_utils_update_stats(struct aq_hw_s *self); struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self); + int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt); +int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac); + +int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size); + +int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_atl_utils_fw_rpc **rpc); + extern const struct aq_fw_ops aq_fw_1x_ops; extern const struct aq_fw_ops aq_fw_2x_ops; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index e37943760a58..096ca5730887 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -16,11 +16,13 @@ #include "../aq_pci_func.h" #include "../aq_ring.h" #include "../aq_vec.h" +#include "../aq_nic.h" #include "hw_atl_utils.h" #include "hw_atl_llh.h" #define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364 #define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360 +#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334 #define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368 #define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C @@ -28,6 +30,42 @@ #define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 #define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 +#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY) +#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL) + +#define HW_ATL_FW2X_CTRL_SLEEP_PROXY BIT(CTRL_SLEEP_PROXY) +#define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL) +#define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP) +#define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE) +#define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE) +#define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT) + +#define HW_ATL_FW2X_CAP_EEE_1G_MASK BIT(CAPS_HI_1000BASET_FD_EEE) +#define HW_ATL_FW2X_CAP_EEE_2G5_MASK BIT(CAPS_HI_2P5GBASET_FD_EEE) +#define HW_ATL_FW2X_CAP_EEE_5G_MASK BIT(CAPS_HI_5GBASET_FD_EEE) +#define HW_ATL_FW2X_CAP_EEE_10G_MASK BIT(CAPS_HI_10GBASET_FD_EEE) + +#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8 +#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E + +struct __packed fw2x_msg_wol_pattern { + u8 mask[16]; + u32 crc; +}; + +struct __packed fw2x_msg_wol { + u32 msg_id; + u8 hw_addr[ETH_ALEN]; + u8 magic_packet_enabled; + u8 filter_count; + struct fw2x_msg_wol_pattern filter[HAL_ATLANTIC_WOL_FILTERS_COUNT]; + u8 link_up_enabled; + u8 link_down_enabled; + u16 reserved; + u32 link_up_timeout; + u32 link_down_timeout; +}; + static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed); static int aq_fw2x_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); @@ -38,8 +76,12 @@ static int aq_fw2x_init(struct aq_hw_s *self) /* check 10 times by 1ms */ AQ_HW_WAIT_FOR(0U != (self->mbox_addr = - aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)), + aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)), 1000U, 10U); + AQ_HW_WAIT_FOR(0U != (self->rpc_addr = + aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR)), + 1000U, 100U); + return err; } @@ -78,6 +120,38 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed) return rate; } +static u32 fw2x_to_eee_mask(u32 speed) +{ + u32 rate = 0; + + if (speed & HW_ATL_FW2X_CAP_EEE_10G_MASK) + rate |= AQ_NIC_RATE_EEE_10G; + if (speed & HW_ATL_FW2X_CAP_EEE_5G_MASK) + rate |= AQ_NIC_RATE_EEE_5G; + if (speed & HW_ATL_FW2X_CAP_EEE_2G5_MASK) + rate |= AQ_NIC_RATE_EEE_2GS; + if (speed & HW_ATL_FW2X_CAP_EEE_1G_MASK) + rate |= AQ_NIC_RATE_EEE_1G; + + return rate; +} + +static u32 eee_mask_to_fw2x(u32 speed) +{ + u32 rate = 0; + + if (speed & AQ_NIC_RATE_EEE_10G) + rate |= HW_ATL_FW2X_CAP_EEE_10G_MASK; + if (speed & AQ_NIC_RATE_EEE_5G) + rate |= HW_ATL_FW2X_CAP_EEE_5G_MASK; + if (speed & AQ_NIC_RATE_EEE_2GS) + rate |= HW_ATL_FW2X_CAP_EEE_2G5_MASK; + if (speed & AQ_NIC_RATE_EEE_1G) + rate |= HW_ATL_FW2X_CAP_EEE_1G_MASK; + + return rate; +} + static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed) { u32 val = link_speed_mask_2fw2x_ratemask(speed); @@ -100,14 +174,27 @@ static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state) *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE); } +static void aq_fw2x_upd_eee_rate_bits(struct aq_hw_s *self, u32 *mpi_opts, + u32 eee_speeds) +{ + *mpi_opts &= ~(HW_ATL_FW2X_CAP_EEE_1G_MASK | + HW_ATL_FW2X_CAP_EEE_2G5_MASK | + HW_ATL_FW2X_CAP_EEE_5G_MASK | + HW_ATL_FW2X_CAP_EEE_10G_MASK); + + *mpi_opts |= eee_mask_to_fw2x(eee_speeds); +} + static int aq_fw2x_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state) { u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; switch (state) { case MPI_INIT: mpi_state &= ~BIT(CAPS_HI_LINK_DROP); + aq_fw2x_upd_eee_rate_bits(self, &mpi_state, cfg->eee_speeds); aq_fw2x_set_mpi_flow_control(self, &mpi_state); break; case MPI_DEINIT: @@ -126,7 +213,7 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self) { u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR); u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G | - FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G); + FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G); struct aq_hw_link_status_s *link_status = &self->aq_link_status; if (speed) { @@ -175,9 +262,7 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) get_random_bytes(&rnd, sizeof(unsigned int)); - l = 0xE3000000U - | (0xFFFFU & rnd) - | (0x00 << 16); + l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16); h = 0x8001300EU; mac[5] = (u8)(0xFFU & l); @@ -207,7 +292,7 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) /* Wait FW to report back */ AQ_HW_WAIT_FOR(orig_stats_val != (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & - BIT(CAPS_HI_STATISTICS)), + BIT(CAPS_HI_STATISTICS)), 1U, 10000U); if (err) return err; @@ -215,6 +300,135 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) return hw_atl_utils_update_stats(self); } +static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac) +{ + struct hw_atl_utils_fw_rpc *rpc = NULL; + struct offload_info *cfg = NULL; + unsigned int rpc_size = 0U; + u32 mpi_opts; + int err = 0; + + rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg); + + err = hw_atl_utils_fw_rpc_wait(self, &rpc); + if (err < 0) + goto err_exit; + + memset(rpc, 0, rpc_size); + cfg = (struct offload_info *)(&rpc->msg_id + 1); + + memcpy(cfg->mac_addr, mac, ETH_ALEN); + cfg->len = sizeof(*cfg); + + /* Clear bit 0x36C.23 and 0x36C.22 */ + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + mpi_opts &= ~HW_ATL_FW2X_CTRL_SLEEP_PROXY; + mpi_opts &= ~HW_ATL_FW2X_CTRL_LINK_DROP; + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + if (err < 0) + goto err_exit; + + /* Set bit 0x36C.23 */ + mpi_opts |= HW_ATL_FW2X_CTRL_SLEEP_PROXY; + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & + HW_ATL_FW2X_CTRL_SLEEP_PROXY), 1U, 10000U); + +err_exit: + return err; +} + +static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac) +{ + struct hw_atl_utils_fw_rpc *rpc = NULL; + struct fw2x_msg_wol *msg = NULL; + u32 mpi_opts; + int err = 0; + + err = hw_atl_utils_fw_rpc_wait(self, &rpc); + if (err < 0) + goto err_exit; + + msg = (struct fw2x_msg_wol *)rpc; + + msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL; + msg->magic_packet_enabled = true; + memcpy(msg->hw_addr, mac, ETH_ALEN); + + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + mpi_opts &= ~(HW_ATL_FW2X_CTRL_SLEEP_PROXY | HW_ATL_FW2X_CTRL_WOL); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + err = hw_atl_utils_fw_rpc_call(self, sizeof(*msg)); + if (err < 0) + goto err_exit; + + /* Set bit 0x36C.24 */ + mpi_opts |= HW_ATL_FW2X_CTRL_WOL; + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & + HW_ATL_FW2X_CTRL_WOL), 1U, 10000U); + +err_exit: + return err; +} + +static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state, + u8 *mac) +{ + int err = 0; + + if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) { + err = aq_fw2x_set_sleep_proxy(self, mac); + if (err < 0) + goto err_exit; + err = aq_fw2x_set_wol_params(self, mac); + } + +err_exit: + return err; +} + +static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed) +{ + u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + aq_fw2x_upd_eee_rate_bits(self, &mpi_opts, speed); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + return 0; +} + +static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate, + u32 *supported_rates) +{ + u32 mpi_state; + u32 caps_hi; + int err = 0; + u32 addr = self->mbox_addr + offsetof(struct hw_atl_utils_mbox, info) + + offsetof(struct hw_aq_info, caps_hi); + + err = hw_atl_utils_fw_downld_dwords(self, addr, &caps_hi, + sizeof(caps_hi) / sizeof(u32)); + + if (err) + return err; + + *supported_rates = fw2x_to_eee_mask(caps_hi); + + mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR); + *rate = fw2x_to_eee_mask(mpi_state); + + return err; +} + static int aq_fw2x_renegotiate(struct aq_hw_s *self) { u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); @@ -247,5 +461,8 @@ const struct aq_fw_ops aq_fw_2x_ops = { .set_state = aq_fw2x_set_state, .update_link_status = aq_fw2x_update_link_status, .update_stats = aq_fw2x_update_stats, - .set_flow_control = aq_fw2x_set_flow_control, + .set_power = aq_fw2x_set_power, + .set_eee_rate = aq_fw2x_set_eee_rate, + .get_eee_rate = aq_fw2x_get_eee_rate, + .set_flow_control = aq_fw2x_set_flow_control, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h index 94efc6477bdc..b48260114da3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/ver.h +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -12,7 +12,7 @@ #define NIC_MAJOR_DRIVER_VERSION 2 #define NIC_MINOR_DRIVER_VERSION 0 -#define NIC_BUILD_DRIVER_VERSION 3 +#define NIC_BUILD_DRIVER_VERSION 4 #define NIC_REVISION_DRIVER_VERSION 0 #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 6d3221134927..7968c644ad86 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1964,8 +1964,6 @@ static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) if (!alx_reset_mac(hw)) rc = PCI_ERS_RESULT_RECOVERED; out: - pci_cleanup_aer_uncorrect_error_status(pdev); - rtnl_unlock(); return rc; diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index b81fbf119bce..63edc5706c09 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -63,7 +63,6 @@ #include <linux/jiffies.h> #include <linux/mii.h> #include <linux/module.h> -#include <linux/moduleparam.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/pci.h> @@ -3278,7 +3277,6 @@ static int atl1_set_link_ksettings(struct net_device *netdev, u16 phy_data; int ret_val = 0; u16 old_media_type = hw->media_type; - u32 advertising; if (netif_running(adapter->netdev)) { if (netif_msg_link(adapter)) @@ -3312,25 +3310,7 @@ static int atl1_set_link_ksettings(struct net_device *netdev, hw->media_type = MEDIA_TYPE_10M_HALF; } } - switch (hw->media_type) { - case MEDIA_TYPE_AUTO_SENSOR: - advertising = - ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | ADVERTISED_TP; - break; - case MEDIA_TYPE_1000M_FULL: - advertising = - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | ADVERTISED_TP; - break; - default: - advertising = 0; - break; - } + if (atl1_phy_setup_autoneg_adv(hw)) { ret_val = -EINVAL; if (netif_msg_link(adapter)) diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index c8d1f8fa4713..6f56276015a4 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -935,18 +935,11 @@ static void nb8800_pause_adv(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; - u32 adv = 0; if (!phydev) return; - if (priv->pause_rx) - adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - if (priv->pause_tx) - adv ^= ADVERTISED_Asym_Pause; - - phydev->supported |= adv; - phydev->advertising |= adv; + phy_set_asym_pause(phydev, priv->pause_rx, priv->pause_tx); } static int nb8800_open(struct net_device *dev) diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 897302adc38e..6bae973d4dce 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -568,12 +568,13 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) /* * tx request callback */ -static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bcm_enet_priv *priv; struct bcm_enet_desc *desc; u32 len_stat; - int ret; + netdev_tx_t ret; priv = netdev_priv(dev); @@ -890,19 +891,10 @@ static int bcm_enet_open(struct net_device *dev) } /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_MII); - phydev->advertising = phydev->supported; - - if (priv->pause_auto && priv->pause_rx && priv->pause_tx) - phydev->advertising |= SUPPORTED_Pause; - else - phydev->advertising &= ~SUPPORTED_Pause; + phy_support_sym_pause(phydev); + phy_set_max_speed(phydev, SPEED_100); + phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx, + priv->pause_auto); phy_attached_info(phydev); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index c57238fce863..4122553e224b 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -126,8 +126,8 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, } /* Ethtool operations */ -static int bcm_sysport_set_rx_csum(struct net_device *dev, - netdev_features_t wanted) +static void bcm_sysport_set_rx_csum(struct net_device *dev, + netdev_features_t wanted) { struct bcm_sysport_priv *priv = netdev_priv(dev); u32 reg; @@ -157,12 +157,10 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev, reg &= ~RXCHK_BRCM_TAG_EN; rxchk_writel(priv, reg, RXCHK_CONTROL); - - return 0; } -static int bcm_sysport_set_tx_csum(struct net_device *dev, - netdev_features_t wanted) +static void bcm_sysport_set_tx_csum(struct net_device *dev, + netdev_features_t wanted) { struct bcm_sysport_priv *priv = netdev_priv(dev); u32 reg; @@ -177,23 +175,24 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev, else reg &= ~tdma_control_bit(priv, TSB_EN); tdma_writel(priv, reg, TDMA_CONTROL); - - return 0; } static int bcm_sysport_set_features(struct net_device *dev, netdev_features_t features) { - netdev_features_t changed = features ^ dev->features; - netdev_features_t wanted = dev->wanted_features; - int ret = 0; + struct bcm_sysport_priv *priv = netdev_priv(dev); + + /* Read CRC forward */ + if (!priv->is_lite) + priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); + else + priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & + GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); - if (changed & NETIF_F_RXCSUM) - ret = bcm_sysport_set_rx_csum(dev, wanted); - if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) - ret = bcm_sysport_set_tx_csum(dev, wanted); + bcm_sysport_set_rx_csum(dev, features); + bcm_sysport_set_tx_csum(dev, features); - return ret; + return 0; } /* Hardware counters must be kept in sync because the order/offset @@ -285,6 +284,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), + STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb), + STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed), /* Per TX-queue statistics are dynamically appended */ }; @@ -1218,6 +1219,7 @@ static void bcm_sysport_poll_controller(struct net_device *dev) static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) { + struct bcm_sysport_priv *priv = netdev_priv(dev); struct sk_buff *nskb; struct bcm_tsb *tsb; u32 csum_info; @@ -1228,13 +1230,16 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, /* Re-allocate SKB if needed */ if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { nskb = skb_realloc_headroom(skb, sizeof(*tsb)); - dev_kfree_skb(skb); if (!nskb) { + dev_kfree_skb_any(skb); + priv->mib.tx_realloc_tsb_failed++; dev->stats.tx_errors++; dev->stats.tx_dropped++; return NULL; } + dev_consume_skb_any(skb); skb = nskb; + priv->mib.tx_realloc_tsb++; } tsb = skb_push(skb, sizeof(*tsb)); @@ -1970,16 +1975,14 @@ static int bcm_sysport_open(struct net_device *dev) else gib_set_pad_extension(priv); + /* Apply features again in case we changed them while interface was + * down + */ + bcm_sysport_set_features(dev, dev->features); + /* Set MAC address */ umac_set_hw_addr(priv, dev->dev_addr); - /* Read CRC forward */ - if (!priv->is_lite) - priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); - else - priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & - GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); - phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 0, priv->phy_interface); if (!phydev) { @@ -2508,9 +2511,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) dev->netdev_ops = &bcm_sysport_netdev_ops; netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); - /* HW supported features, none enabled by default */ - dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features; /* Request the WOL interrupt and advertise suspend if available */ priv->wol_irq_disabled = 1; @@ -2710,7 +2714,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) struct net_device *dev = dev_get_drvdata(d); struct bcm_sysport_priv *priv = netdev_priv(dev); unsigned int i; - u32 reg; int ret; if (!netif_running(dev)) @@ -2754,12 +2757,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) goto out_free_rx_ring; } - /* Enable rxhck */ - if (priv->rx_chk_en) { - reg = rxchk_readl(priv, RXCHK_CONTROL); - reg |= RXCHK_EN; - rxchk_writel(priv, reg, RXCHK_CONTROL); - } + /* Restore enabled features */ + bcm_sysport_set_features(dev, dev->features); rbuf_init(priv); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 046c6c1d97fd..a7a230884a87 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -607,6 +607,8 @@ struct bcm_sysport_mib { u32 alloc_rx_buff_failed; u32 rx_dma_failed; u32 tx_dma_failed; + u32 tx_realloc_tsb; + u32 tx_realloc_tsb_failed; }; /* HW maintains a large list of counters */ diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 4c94d9218bba..cabc8e49ad24 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -616,7 +616,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; int size; /* ring size: different for Tx and Rx */ - int err; int i; BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); @@ -666,7 +665,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) if (!ring->cpu_base) { dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", ring->mmio_base); - err = -ENOMEM; goto err_dma_free; } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 122fdb80a789..bbb247116045 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8793,13 +8793,6 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) if (!(bp->flags & BNX2_FLAG_AER_ENABLED)) return result; - err = pci_cleanup_aer_uncorrect_error_status(pdev); - if (err) { - dev_err(&pdev->dev, - "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", - err); /* non-fatal, continue */ - } - return result; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 0e508e5defce..142bc11b9fbb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -494,6 +494,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vf, int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto); +int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val); /* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index fcc2328bb0d9..95309b27c7d1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -3536,6 +3536,16 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) */ static void bnx2x_config_mf_bw(struct bnx2x *bp) { + /* Workaround for MFW bug. + * MFW is not supposed to generate BW attention in + * single function mode. + */ + if (!IS_MF(bp)) { + DP(BNX2X_MSG_MCP, + "Ignoring MF BW config in single function mode\n"); + return; + } + if (bp->link_vars.link_up) { bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); bnx2x_link_sync_notify(bp); @@ -13105,6 +13115,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_set_vf_mac = bnx2x_set_vf_mac, .ndo_set_vf_vlan = bnx2x_set_vf_vlan, .ndo_get_vf_config = bnx2x_get_vf_config, + .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk, #endif #ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, @@ -14369,14 +14380,6 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) rtnl_unlock(); - /* If AER, perform cleanup of the PCIe registers */ - if (bp->flags & AER_ENABLED) { - if (pci_cleanup_aer_uncorrect_error_status(pdev)) - BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n"); - else - DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n"); - } - return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 62da46537734..c835f6c7ecd0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -209,7 +209,10 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, */ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); - __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); + if (vf->spoofchk) + __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); + else + __clear_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); /* Setup-op rx parameters */ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { @@ -1269,6 +1272,8 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, bnx2x_vf(bp, i, state) = VF_FREE; mutex_init(&bnx2x_vf(bp, i, op_mutex)); bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; + /* enable spoofchk by default */ + bnx2x_vf(bp, i, spoofchk) = 1; } /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ @@ -2632,7 +2637,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ivi->qos = 0; ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */ ivi->min_tx_rate = 0; - ivi->spoofchk = 1; /*always enabled */ + ivi->spoofchk = vf->spoofchk ? 1 : 0; + ivi->linkstate = vf->link_cfg; if (vf->state == VF_ENABLED) { /* mac and vlan are in vlan_mac objects */ if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { @@ -2950,6 +2956,77 @@ out: return rc; } +int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val) +{ + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_virtf *vf; + int i, rc = 0; + + vf = BP_VF(bp, idx); + if (!vf) + return -EINVAL; + + /* nothing to do */ + if (vf->spoofchk == val) + return 0; + + vf->spoofchk = val ? 1 : 0; + + DP(BNX2X_MSG_IOV, "%s spoofchk for VF %d\n", + val ? "enabling" : "disabling", idx); + + /* is vf initialized and queue set up? */ + if (vf->state != VF_ENABLED || + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != + BNX2X_Q_LOGICAL_STATE_ACTIVE) + return rc; + + /* User should be able to see error in system logs */ + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; + + /* send queue update ramrods to configure spoofchk */ + for_each_vfq(vf, i) { + struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_queue_update_params *update_params; + + q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj); + + /* validate the Q is UP */ + if (bnx2x_get_q_logical_state(bp, q_params.q_obj) != + BNX2X_Q_LOGICAL_STATE_ACTIVE) + continue; + + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + update_params = &q_params.params.update; + __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, + &update_params->update_flags); + if (val) { + __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, + &update_params->update_flags); + } else { + __clear_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, + &update_params->update_flags); + } + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to %s spoofchk on VF %d - vfq %d\n", + val ? "enable" : "disable", idx, i); + goto out; + } + } +out: + if (!rc) + DP(BNX2X_MSG_IOV, + "%s spoofchk for VF[%d]\n", val ? "Enabled" : "Disabled", + idx); + + return rc; +} + /* crc is the first field in the bulletin board. Compute the crc over the * entire bulletin board excluding the crc field itself. Use the length field * as the Bulletin Board was posted by a PF with possibly a different version diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index eb814c65152f..b6ebd92ec565 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -142,6 +142,8 @@ struct bnx2x_virtf { bool flr_clnup_stage; /* true during flr cleanup */ bool malicious; /* true if FW indicated so, until FLR */ + /* 1(true) if spoof check is enabled */ + u8 spoofchk; /* dma */ dma_addr_t fw_stat_map; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e2d92548226a..dd85d790f638 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -111,6 +111,7 @@ enum board_idx { BCM57452, BCM57454, BCM5745x_NPAR, + BCM57508, BCM58802, BCM58804, BCM58808, @@ -152,6 +153,7 @@ static const struct { [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, + [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, @@ -196,6 +198,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, + { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, #ifdef CONFIG_BNXT_SRIOV @@ -241,15 +244,46 @@ static bool bnxt_vf_pciid(enum board_idx idx) #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) -#define BNXT_CP_DB_REARM(db, raw_cons) \ - writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db) - -#define BNXT_CP_DB(db, raw_cons) \ - writel(DB_CP_FLAGS | RING_CMP(raw_cons), db) - #define BNXT_CP_DB_IRQ_DIS(db) \ writel(DB_CP_IRQ_DIS_FLAGS, db) +#define BNXT_DB_CQ(db, idx) \ + writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) + +#define BNXT_DB_NQ_P5(db, idx) \ + writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell) + +#define BNXT_DB_CQ_ARM(db, idx) \ + writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) + +#define BNXT_DB_NQ_ARM_P5(db, idx) \ + writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell) + +static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + BNXT_DB_NQ_P5(db, idx); + else + BNXT_DB_CQ(db, idx); +} + +static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + BNXT_DB_NQ_ARM_P5(db, idx); + else + BNXT_DB_CQ_ARM(db, idx); +} + +static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx), + db->doorbell); + else + BNXT_DB_CQ(db, idx); +} + const u16 bnxt_lhint_arr[] = { TX_BD_FLAGS_LHINT_512_AND_SMALLER, TX_BD_FLAGS_LHINT_512_TO_1023, @@ -341,6 +375,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) struct tx_push_buffer *tx_push_buf = txr->tx_push; struct tx_push_bd *tx_push = &tx_push_buf->push_bd; struct tx_bd_ext *tx_push1 = &tx_push->txbd2; + void __iomem *db = txr->tx_db.doorbell; void *pdata = tx_push_buf->data; u64 *end; int j, push_len; @@ -398,12 +433,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) push_len = (length + sizeof(*tx_push) + 7) / 8; if (push_len > 16) { - __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); - __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1, + __iowrite64_copy(db, tx_push_buf, 16); + __iowrite32_copy(db + 4, tx_push_buf + 1, (push_len - 16) << 1); } else { - __iowrite64_copy(txr->tx_doorbell, tx_push_buf, - push_len); + __iowrite64_copy(db, tx_push_buf, push_len); } goto tx_done; @@ -505,7 +539,7 @@ normal_tx: txr->tx_prod = prod; if (!skb->xmit_more || netif_xmit_stopped(txq)) - bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod); + bnxt_db_write(bp, &txr->tx_db, prod); tx_done: @@ -513,7 +547,7 @@ tx_done: if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { if (skb->xmit_more && !tx_buf->is_push) - bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod); + bnxt_db_write(bp, &txr->tx_db, prod); netif_tx_stop_queue(txq); @@ -776,11 +810,11 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, return 0; } -static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, +static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons, u32 agg_bufs) { + struct bnxt_napi *bnapi = cpr->bnapi; struct bnxt *bp = bnapi->bp; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; u16 sw_prod = rxr->rx_sw_agg_prod; @@ -903,12 +937,13 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, return skb; } -static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, +static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, struct sk_buff *skb, u16 cp_cons, u32 agg_bufs) { + struct bnxt_napi *bnapi = cpr->bnapi; struct pci_dev *pdev = bp->pdev; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; u32 i; @@ -955,7 +990,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, * allocated already. */ rxr->rx_agg_prod = prod; - bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i); + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i); return NULL; } @@ -1012,10 +1047,9 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, return skb; } -static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, +static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u32 *raw_cons, void *cmp) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct rx_cmp *rxcmp = cmp; u32 tmp_raw_cons = *raw_cons; u8 cmp_type, agg_bufs = 0; @@ -1141,11 +1175,11 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, cons_rx_buf->data = NULL; } -static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, - u16 cp_cons, u32 agg_bufs) +static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons, + u32 agg_bufs) { if (agg_bufs) - bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); } static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, @@ -1339,13 +1373,13 @@ static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) } static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, - struct bnxt_napi *bnapi, + struct bnxt_cp_ring_info *cpr, u32 *raw_cons, struct rx_tpa_end_cmp *tpa_end, struct rx_tpa_end_cmp_ext *tpa_end1, u8 *event) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_napi *bnapi = cpr->bnapi; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u8 agg_id = TPA_END_AGG_ID(tpa_end); u8 *data_ptr, agg_bufs; @@ -1357,7 +1391,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, void *data; if (unlikely(bnapi->in_reset)) { - int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); + int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); if (rc < 0) return ERR_PTR(-EBUSY); @@ -1383,7 +1417,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { - bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, cp_cons, agg_bufs); if (agg_bufs > MAX_SKB_FRAGS) netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", agg_bufs, (int)MAX_SKB_FRAGS); @@ -1393,7 +1427,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (len <= bp->rx_copy_thresh) { skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { - bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, cp_cons, agg_bufs); return NULL; } } else { @@ -1402,7 +1436,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); if (!new_data) { - bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, cp_cons, agg_bufs); return NULL; } @@ -1417,7 +1451,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (!skb) { kfree(data); - bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, cp_cons, agg_bufs); return NULL; } skb_reserve(skb, bp->rx_offset); @@ -1425,7 +1459,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (agg_bufs) { - skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); + skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ return NULL; @@ -1479,10 +1513,10 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, * -ENOMEM - packet aborted due to out of memory * -EIO - packet aborted due to hw error indicated in BD */ -static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, - u8 *event) +static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + u32 *raw_cons, u8 *event) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_napi *bnapi = cpr->bnapi; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; struct net_device *dev = bp->dev; struct rx_cmp *rxcmp; @@ -1521,7 +1555,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, goto next_rx_no_prod_no_len; } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { - skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, + skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, (struct rx_tpa_end_cmp *)rxcmp, (struct rx_tpa_end_cmp_ext *)rxcmp1, event); @@ -1542,7 +1576,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, data = rx_buf->data; data_ptr = rx_buf->data_ptr; if (unlikely(cons != rxr->rx_next_cons)) { - int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); + int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); bnxt_sched_reset(bp, rxr); return rc1; @@ -1565,7 +1599,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { bnxt_reuse_rx_data(rxr, cons, data); if (agg_bufs) - bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); rc = -EIO; goto next_rx; @@ -1602,7 +1636,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } if (agg_bufs) { - skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); + skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs); if (!skb) { rc = -ENOMEM; goto next_rx; @@ -1664,10 +1698,10 @@ next_rx_no_prod_no_len: /* In netpoll mode, if we are using a combined completion ring, we need to * discard the rx packets and recycle the buffers. */ -static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi, +static int bnxt_force_rx_discard(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, u32 *raw_cons, u8 *event) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; u32 tmp_raw_cons = *raw_cons; struct rx_cmp_ext *rxcmp1; struct rx_cmp *rxcmp; @@ -1697,7 +1731,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi, tpa_end1->rx_tpa_end_cmp_errors_v2 |= cpu_to_le32(RX_TPA_END_CMP_ERRORS); } - return bnxt_rx_pkt(bp, bnapi, raw_cons, event); + return bnxt_rx_pkt(bp, cpr, raw_cons, event); } #define BNXT_GET_EVENT_PORT(data) \ @@ -1848,7 +1882,7 @@ static irqreturn_t bnxt_inta(int irq, void *dev_instance) } /* disable ring IRQ */ - BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell); + BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); /* Return here if interrupt is shared and is disabled. */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) @@ -1858,9 +1892,10 @@ static irqreturn_t bnxt_inta(int irq, void *dev_instance) return IRQ_HANDLED; } -static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + int budget) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_napi *bnapi = cpr->bnapi; u32 raw_cons = cpr->cp_raw_cons; u32 cons; int tx_pkts = 0; @@ -1868,6 +1903,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) u8 event = 0; struct tx_cmp *txcmp; + cpr->has_more_work = 0; while (1) { int rc; @@ -1881,19 +1917,22 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) * reading any further. */ dma_rmb(); + cpr->had_work_done = 1; if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { tx_pkts++; /* return full budget so NAPI will complete. */ if (unlikely(tx_pkts > bp->tx_wake_thresh)) { rx_pkts = budget; raw_cons = NEXT_RAW_CMP(raw_cons); + if (budget) + cpr->has_more_work = 1; break; } } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { if (likely(budget)) - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); + rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); else - rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons, + rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, &event); if (likely(rc >= 0)) rx_pkts += rc; @@ -1916,39 +1955,60 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) } raw_cons = NEXT_RAW_CMP(raw_cons); - if (rx_pkts && rx_pkts == budget) + if (rx_pkts && rx_pkts == budget) { + cpr->has_more_work = 1; break; + } } if (event & BNXT_TX_EVENT) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; - void __iomem *db = txr->tx_doorbell; u16 prod = txr->tx_prod; /* Sync BD data before updating doorbell */ wmb(); - bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod); + bnxt_db_write_relaxed(bp, &txr->tx_db, prod); } cpr->cp_raw_cons = raw_cons; - /* ACK completion ring before freeing tx ring and producing new - * buffers in rx/agg rings to prevent overflowing the completion - * ring. - */ - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + bnapi->tx_pkts += tx_pkts; + bnapi->events |= event; + return rx_pkts; +} - if (tx_pkts) - bnapi->tx_int(bp, bnapi, tx_pkts); +static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) +{ + if (bnapi->tx_pkts) { + bnapi->tx_int(bp, bnapi, bnapi->tx_pkts); + bnapi->tx_pkts = 0; + } - if (event & BNXT_RX_EVENT) { + if (bnapi->events & BNXT_RX_EVENT) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; - bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod); - if (event & BNXT_AGG_EVENT) - bnxt_db_write(bp, rxr->rx_agg_doorbell, - DB_KEY_RX | rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + if (bnapi->events & BNXT_AGG_EVENT) + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); } + bnapi->events = 0; +} + +static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + int budget) +{ + struct bnxt_napi *bnapi = cpr->bnapi; + int rx_pkts; + + rx_pkts = __bnxt_poll_work(bp, cpr, budget); + + /* ACK completion ring before freeing tx ring and producing new + * buffers in rx/agg rings to prevent overflowing the completion + * ring. + */ + bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); + + __bnxt_poll_work_done(bp, bnapi); return rx_pkts; } @@ -1987,7 +2047,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) rxcmp1->rx_cmp_cfa_code_errors_v2 |= cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); + rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); if (likely(rc == -EIO) && budget) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ @@ -2006,16 +2066,15 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) } cpr->cp_raw_cons = raw_cons; - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); - bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod); + BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); if (event & BNXT_AGG_EVENT) - bnxt_db_write(bp, rxr->rx_agg_doorbell, - DB_KEY_RX | rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { napi_complete_done(napi, rx_pkts); - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); } return rx_pkts; } @@ -2028,19 +2087,17 @@ static int bnxt_poll(struct napi_struct *napi, int budget) int work_done = 0; while (1) { - work_done += bnxt_poll_work(bp, bnapi, budget - work_done); + work_done += bnxt_poll_work(bp, cpr, budget - work_done); if (work_done >= budget) { if (!budget) - BNXT_CP_DB_REARM(cpr->cp_doorbell, - cpr->cp_raw_cons); + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); break; } if (!bnxt_has_work(bp, cpr)) { if (napi_complete_done(napi, work_done)) - BNXT_CP_DB_REARM(cpr->cp_doorbell, - cpr->cp_raw_cons); + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); break; } } @@ -2057,6 +2114,104 @@ static int bnxt_poll(struct napi_struct *napi, int budget) return work_done; } +static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int i, work_done = 0; + + for (i = 0; i < 2; i++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; + + if (cpr2) { + work_done += __bnxt_poll_work(bp, cpr2, + budget - work_done); + cpr->has_more_work |= cpr2->has_more_work; + } + } + return work_done; +} + +static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, + u64 dbr_type, bool all) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int i; + + for (i = 0; i < 2; i++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; + struct bnxt_db_info *db; + + if (cpr2 && (all || cpr2->had_work_done)) { + db = &cpr2->cp_db; + writeq(db->db_key64 | dbr_type | + RING_CMP(cpr2->cp_raw_cons), db->doorbell); + cpr2->had_work_done = 0; + } + } + __bnxt_poll_work_done(bp, bnapi); +} + +static int bnxt_poll_p5(struct napi_struct *napi, int budget) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u32 raw_cons = cpr->cp_raw_cons; + struct bnxt *bp = bnapi->bp; + struct nqe_cn *nqcmp; + int work_done = 0; + u32 cons; + + if (cpr->has_more_work) { + cpr->has_more_work = 0; + work_done = __bnxt_poll_cqs(bp, bnapi, budget); + if (cpr->has_more_work) { + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false); + return work_done; + } + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true); + if (napi_complete_done(napi, work_done)) + BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons); + return work_done; + } + while (1) { + cons = RING_CMP(raw_cons); + nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + if (!NQ_CMP_VALID(nqcmp, raw_cons)) { + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, + false); + cpr->cp_raw_cons = raw_cons; + if (napi_complete_done(napi, work_done)) + BNXT_DB_NQ_ARM_P5(&cpr->cp_db, + cpr->cp_raw_cons); + return work_done; + } + + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + + if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) { + u32 idx = le32_to_cpu(nqcmp->cq_handle_low); + struct bnxt_cp_ring_info *cpr2; + + cpr2 = cpr->cp_ring_arr[idx]; + work_done += __bnxt_poll_work(bp, cpr2, + budget - work_done); + cpr->has_more_work = cpr2->has_more_work; + } else { + bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); + } + raw_cons = NEXT_RAW_CMP(raw_cons); + if (cpr->has_more_work) + break; + } + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true); + cpr->cp_raw_cons = raw_cons; + return work_done; +} + static void bnxt_free_tx_skbs(struct bnxt *bp) { int i, max_idx; @@ -2202,60 +2357,73 @@ static void bnxt_free_skbs(struct bnxt *bp) bnxt_free_rx_skbs(bp); } -static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) +static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) { struct pci_dev *pdev = bp->pdev; int i; - for (i = 0; i < ring->nr_pages; i++) { - if (!ring->pg_arr[i]) + for (i = 0; i < rmem->nr_pages; i++) { + if (!rmem->pg_arr[i]) continue; - dma_free_coherent(&pdev->dev, ring->page_size, - ring->pg_arr[i], ring->dma_arr[i]); + dma_free_coherent(&pdev->dev, rmem->page_size, + rmem->pg_arr[i], rmem->dma_arr[i]); - ring->pg_arr[i] = NULL; + rmem->pg_arr[i] = NULL; } - if (ring->pg_tbl) { - dma_free_coherent(&pdev->dev, ring->nr_pages * 8, - ring->pg_tbl, ring->pg_tbl_map); - ring->pg_tbl = NULL; + if (rmem->pg_tbl) { + dma_free_coherent(&pdev->dev, rmem->nr_pages * 8, + rmem->pg_tbl, rmem->pg_tbl_map); + rmem->pg_tbl = NULL; } - if (ring->vmem_size && *ring->vmem) { - vfree(*ring->vmem); - *ring->vmem = NULL; + if (rmem->vmem_size && *rmem->vmem) { + vfree(*rmem->vmem); + *rmem->vmem = NULL; } } -static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) +static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) { - int i; struct pci_dev *pdev = bp->pdev; + u64 valid_bit = 0; + int i; - if (ring->nr_pages > 1) { - ring->pg_tbl = dma_alloc_coherent(&pdev->dev, - ring->nr_pages * 8, - &ring->pg_tbl_map, + if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) + valid_bit = PTU_PTE_VALID; + if (rmem->nr_pages > 1) { + rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, + rmem->nr_pages * 8, + &rmem->pg_tbl_map, GFP_KERNEL); - if (!ring->pg_tbl) + if (!rmem->pg_tbl) return -ENOMEM; } - for (i = 0; i < ring->nr_pages; i++) { - ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev, - ring->page_size, - &ring->dma_arr[i], + for (i = 0; i < rmem->nr_pages; i++) { + u64 extra_bits = valid_bit; + + rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, + rmem->page_size, + &rmem->dma_arr[i], GFP_KERNEL); - if (!ring->pg_arr[i]) + if (!rmem->pg_arr[i]) return -ENOMEM; - if (ring->nr_pages > 1) - ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]); + if (rmem->nr_pages > 1) { + if (i == rmem->nr_pages - 2 && + (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) + extra_bits |= PTU_PTE_NEXT_TO_LAST; + else if (i == rmem->nr_pages - 1 && + (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) + extra_bits |= PTU_PTE_LAST; + rmem->pg_tbl[i] = + cpu_to_le64(rmem->dma_arr[i] | extra_bits); + } } - if (ring->vmem_size) { - *ring->vmem = vzalloc(ring->vmem_size); - if (!(*ring->vmem)) + if (rmem->vmem_size) { + *rmem->vmem = vzalloc(rmem->vmem_size); + if (!(*rmem->vmem)) return -ENOMEM; } return 0; @@ -2285,10 +2453,10 @@ static void bnxt_free_rx_rings(struct bnxt *bp) rxr->rx_agg_bmap = NULL; ring = &rxr->rx_ring_struct; - bnxt_free_ring(bp, ring); + bnxt_free_ring(bp, &ring->ring_mem); ring = &rxr->rx_agg_ring_struct; - bnxt_free_ring(bp, ring); + bnxt_free_ring(bp, &ring->ring_mem); } } @@ -2315,15 +2483,16 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) if (rc < 0) return rc; - rc = bnxt_alloc_ring(bp, ring); + rc = bnxt_alloc_ring(bp, &ring->ring_mem); if (rc) return rc; + ring->grp_idx = i; if (agg_rings) { u16 mem_size; ring = &rxr->rx_agg_ring_struct; - rc = bnxt_alloc_ring(bp, ring); + rc = bnxt_alloc_ring(bp, &ring->ring_mem); if (rc) return rc; @@ -2366,7 +2535,7 @@ static void bnxt_free_tx_rings(struct bnxt *bp) ring = &txr->tx_ring_struct; - bnxt_free_ring(bp, ring); + bnxt_free_ring(bp, &ring->ring_mem); } } @@ -2397,7 +2566,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) ring = &txr->tx_ring_struct; - rc = bnxt_alloc_ring(bp, ring); + rc = bnxt_alloc_ring(bp, &ring->ring_mem); if (rc) return rc; @@ -2443,6 +2612,7 @@ static void bnxt_free_cp_rings(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr; struct bnxt_ring_struct *ring; + int j; if (!bnapi) continue; @@ -2450,12 +2620,51 @@ static void bnxt_free_cp_rings(struct bnxt *bp) cpr = &bnapi->cp_ring; ring = &cpr->cp_ring_struct; - bnxt_free_ring(bp, ring); + bnxt_free_ring(bp, &ring->ring_mem); + + for (j = 0; j < 2; j++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; + + if (cpr2) { + ring = &cpr2->cp_ring_struct; + bnxt_free_ring(bp, &ring->ring_mem); + kfree(cpr2); + cpr->cp_ring_arr[j] = NULL; + } + } + } +} + +static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) +{ + struct bnxt_ring_mem_info *rmem; + struct bnxt_ring_struct *ring; + struct bnxt_cp_ring_info *cpr; + int rc; + + cpr = kzalloc(sizeof(*cpr), GFP_KERNEL); + if (!cpr) + return NULL; + + ring = &cpr->cp_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->cp_nr_pages; + rmem->page_size = HW_CMPD_RING_SIZE; + rmem->pg_arr = (void **)cpr->cp_desc_ring; + rmem->dma_arr = cpr->cp_desc_mapping; + rmem->flags = BNXT_RMEM_RING_PTE_FLAG; + rc = bnxt_alloc_ring(bp, rmem); + if (rc) { + bnxt_free_ring(bp, rmem); + kfree(cpr); + cpr = NULL; } + return cpr; } static int bnxt_alloc_cp_rings(struct bnxt *bp) { + bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); int i, rc, ulp_base_vec, ulp_msix; ulp_msix = bnxt_get_ulp_msix_num(bp); @@ -2469,9 +2678,10 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) continue; cpr = &bnapi->cp_ring; + cpr->bnapi = bnapi; ring = &cpr->cp_ring_struct; - rc = bnxt_alloc_ring(bp, ring); + rc = bnxt_alloc_ring(bp, &ring->ring_mem); if (rc) return rc; @@ -2479,6 +2689,29 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) ring->map_idx = i + ulp_msix; else ring->map_idx = i; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + continue; + + if (i < bp->rx_nr_rings) { + struct bnxt_cp_ring_info *cpr2 = + bnxt_alloc_cp_sub_ring(bp); + + cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2; + if (!cpr2) + return -ENOMEM; + cpr2->bnapi = bnapi; + } + if ((sh && i < bp->tx_nr_rings) || + (!sh && i >= bp->rx_nr_rings)) { + struct bnxt_cp_ring_info *cpr2 = + bnxt_alloc_cp_sub_ring(bp); + + cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2; + if (!cpr2) + return -ENOMEM; + cpr2->bnapi = bnapi; + } } return 0; } @@ -2489,6 +2722,7 @@ static void bnxt_init_ring_struct(struct bnxt *bp) for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_ring_mem_info *rmem; struct bnxt_cp_ring_info *cpr; struct bnxt_rx_ring_info *rxr; struct bnxt_tx_ring_info *txr; @@ -2499,31 +2733,34 @@ static void bnxt_init_ring_struct(struct bnxt *bp) cpr = &bnapi->cp_ring; ring = &cpr->cp_ring_struct; - ring->nr_pages = bp->cp_nr_pages; - ring->page_size = HW_CMPD_RING_SIZE; - ring->pg_arr = (void **)cpr->cp_desc_ring; - ring->dma_arr = cpr->cp_desc_mapping; - ring->vmem_size = 0; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->cp_nr_pages; + rmem->page_size = HW_CMPD_RING_SIZE; + rmem->pg_arr = (void **)cpr->cp_desc_ring; + rmem->dma_arr = cpr->cp_desc_mapping; + rmem->vmem_size = 0; rxr = bnapi->rx_ring; if (!rxr) goto skip_rx; ring = &rxr->rx_ring_struct; - ring->nr_pages = bp->rx_nr_pages; - ring->page_size = HW_RXBD_RING_SIZE; - ring->pg_arr = (void **)rxr->rx_desc_ring; - ring->dma_arr = rxr->rx_desc_mapping; - ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; - ring->vmem = (void **)&rxr->rx_buf_ring; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->rx_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_desc_ring; + rmem->dma_arr = rxr->rx_desc_mapping; + rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; + rmem->vmem = (void **)&rxr->rx_buf_ring; ring = &rxr->rx_agg_ring_struct; - ring->nr_pages = bp->rx_agg_nr_pages; - ring->page_size = HW_RXBD_RING_SIZE; - ring->pg_arr = (void **)rxr->rx_agg_desc_ring; - ring->dma_arr = rxr->rx_agg_desc_mapping; - ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; - ring->vmem = (void **)&rxr->rx_agg_ring; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->rx_agg_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; + rmem->dma_arr = rxr->rx_agg_desc_mapping; + rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; + rmem->vmem = (void **)&rxr->rx_agg_ring; skip_rx: txr = bnapi->tx_ring; @@ -2531,12 +2768,13 @@ skip_rx: continue; ring = &txr->tx_ring_struct; - ring->nr_pages = bp->tx_nr_pages; - ring->page_size = HW_RXBD_RING_SIZE; - ring->pg_arr = (void **)txr->tx_desc_ring; - ring->dma_arr = txr->tx_desc_mapping; - ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; - ring->vmem = (void **)&txr->tx_buf_ring; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->tx_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)txr->tx_desc_ring; + rmem->dma_arr = txr->tx_desc_mapping; + rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; + rmem->vmem = (void **)&txr->tx_buf_ring; } } @@ -2546,8 +2784,8 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) u32 prod; struct rx_bd **rx_buf_ring; - rx_buf_ring = (struct rx_bd **)ring->pg_arr; - for (i = 0, prod = 0; i < ring->nr_pages; i++) { + rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; + for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { int j; struct rx_bd *rxbd; @@ -2649,7 +2887,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) static void bnxt_init_cp_rings(struct bnxt *bp) { - int i; + int i, j; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; @@ -2658,6 +2896,17 @@ static void bnxt_init_cp_rings(struct bnxt *bp) ring->fw_ring_id = INVALID_HW_RING_ID; cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; + for (j = 0; j < 2; j++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; + + if (!cpr2) + continue; + + ring = &cpr2->cp_ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; + cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; + } } } @@ -2761,10 +3010,12 @@ static void bnxt_init_vnics(struct bnxt *bp) for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + int j; vnic->fw_vnic_id = INVALID_HW_RING_ID; - vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; - vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; + for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) + vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; + vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; if (bp->vnic_info[i].rss_hash_key) { @@ -2978,6 +3229,9 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp) } } + if (bp->flags & BNXT_FLAG_CHIP_P5) + goto vnic_skip_grps; + if (vnic->flags & BNXT_VNIC_RSS_FLAG) max_rings = bp->rx_nr_rings; else @@ -2988,7 +3242,7 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp) rc = -ENOMEM; goto out; } - +vnic_skip_grps: if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && !(vnic->flags & BNXT_VNIC_RSS_FLAG)) continue; @@ -3042,7 +3296,7 @@ static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) if (bp->hwrm_short_cmd_req_addr) { struct pci_dev *pdev = bp->pdev; - dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN, + dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, bp->hwrm_short_cmd_req_addr, bp->hwrm_short_cmd_req_dma_addr); bp->hwrm_short_cmd_req_addr = NULL; @@ -3054,7 +3308,7 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) struct pci_dev *pdev = bp->pdev; bp->hwrm_short_cmd_req_addr = - dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN, + dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, &bp->hwrm_short_cmd_req_dma_addr, GFP_KERNEL); if (!bp->hwrm_short_cmd_req_addr) @@ -3078,6 +3332,13 @@ static void bnxt_free_stats(struct bnxt *bp) bp->hw_rx_port_stats = NULL; } + if (bp->hw_tx_port_stats_ext) { + dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext), + bp->hw_tx_port_stats_ext, + bp->hw_tx_port_stats_ext_map); + bp->hw_tx_port_stats_ext = NULL; + } + if (bp->hw_rx_port_stats_ext) { dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), bp->hw_rx_port_stats_ext, @@ -3152,6 +3413,13 @@ static int bnxt_alloc_stats(struct bnxt *bp) if (!bp->hw_rx_port_stats_ext) return 0; + if (bp->hwrm_spec_code >= 0x10902) { + bp->hw_tx_port_stats_ext = + dma_zalloc_coherent(&pdev->dev, + sizeof(struct tx_port_stats_ext), + &bp->hw_tx_port_stats_ext_map, + GFP_KERNEL); + } bp->flags |= BNXT_FLAG_PORT_STATS_EXT; } return 0; @@ -3290,6 +3558,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) bp->bnapi[i] = bnapi; bp->bnapi[i]->index = i; bp->bnapi[i]->bp = bp; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_cp_ring_info *cpr = + &bp->bnapi[i]->cp_ring; + + cpr->cp_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + } } bp->rx_ring = kcalloc(bp->rx_nr_rings, @@ -3299,7 +3574,15 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) return -ENOMEM; for (i = 0; i < bp->rx_nr_rings; i++) { - bp->rx_ring[i].bnapi = bp->bnapi[i]; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + + if (bp->flags & BNXT_FLAG_CHIP_P5) { + rxr->rx_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + rxr->rx_agg_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + } + rxr->bnapi = bp->bnapi[i]; bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; } @@ -3321,12 +3604,16 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) j = bp->rx_nr_rings; for (i = 0; i < bp->tx_nr_rings; i++, j++) { - bp->tx_ring[i].bnapi = bp->bnapi[j]; - bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + txr->tx_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + txr->bnapi = bp->bnapi[j]; + bp->bnapi[j]->tx_ring = txr; bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; if (i >= bp->tx_nr_rings_xdp) { - bp->tx_ring[i].txq_index = i - - bp->tx_nr_rings_xdp; + txr->txq_index = i - bp->tx_nr_rings_xdp; bp->bnapi[j]->tx_int = bnxt_tx_int; } else { bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; @@ -3386,7 +3673,7 @@ static void bnxt_disable_int(struct bnxt *bp) struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; if (ring->fw_ring_id != INVALID_HW_RING_ID) - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); } } @@ -3422,7 +3709,7 @@ static void bnxt_enable_int(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); } } @@ -3455,12 +3742,27 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, cp_ring_id = le16_to_cpu(req->cmpl_ring); intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; - if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) { + if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { + if (msg_len > bp->hwrm_max_ext_req_len || + !bp->hwrm_short_cmd_req_addr) + return -EINVAL; + } + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || + msg_len > BNXT_HWRM_MAX_REQ_LEN) { void *short_cmd_req = bp->hwrm_short_cmd_req_addr; + u16 max_msg_len; + + /* Set boundary for maximum extended request length for short + * cmd format. If passed up from device use the max supported + * internal req length. + */ + max_msg_len = bp->hwrm_max_ext_req_len; memcpy(short_cmd_req, req, msg_len); - memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN - - msg_len); + if (msg_len < max_msg_len) + memset(short_cmd_req + msg_len, 0, + max_msg_len - msg_len); short_input.req_type = req->req_type; short_input.signature = @@ -3989,13 +4291,48 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) +{ + struct bnxt_ring_grp_info *grp_info; + + grp_info = &bp->grp_info[ring->grp_idx]; + return grp_info->cp_fw_ring_id; +} + +static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_napi *bnapi = rxr->bnapi; + struct bnxt_cp_ring_info *cpr; + + cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL]; + return cpr->cp_ring_struct.fw_ring_id; + } else { + return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); + } +} + +static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_napi *bnapi = txr->bnapi; + struct bnxt_cp_ring_info *cpr; + + cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL]; + return cpr->cp_ring_struct.fw_ring_id; + } else { + return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); + } +} + static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) { u32 i, j, max_rings; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_rss_cfg_input req = {0}; - if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) + if ((bp->flags & BNXT_FLAG_CHIP_P5) || + vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); @@ -4026,6 +4363,51 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) +{ + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; + struct hwrm_vnic_rss_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); + req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + if (!set_rss) { + hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return 0; + } + req.hash_type = cpu_to_le32(bp->rss_hash_cfg); + req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); + req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); + nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); + for (i = 0, k = 0; i < nr_ctxs; i++) { + __le16 *ring_tbl = vnic->rss_table; + int rc; + + req.ring_table_pair_index = i; + req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); + for (j = 0; j < 64; j++) { + u16 ring_id; + + ring_id = rxr->rx_ring_struct.fw_ring_id; + *ring_tbl++ = cpu_to_le16(ring_id); + ring_id = bnxt_cp_ring_for_rx(bp, rxr); + *ring_tbl++ = cpu_to_le16(ring_id); + rxr++; + k++; + if (k == max_rings) { + k = 0; + rxr = &bp->rx_ring[0]; + } + } + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return -EIO; + } + return 0; +} + static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; @@ -4109,6 +4491,18 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; + + req.default_rx_ring_id = + cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); + req.default_cmpl_ring_id = + cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); + req.enables = + cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | + VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); + goto vnic_mru; + } req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); /* Only RSS support for now TBD: COS & LB */ if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { @@ -4141,13 +4535,13 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) ring = bp->rx_nr_rings - 1; grp_idx = bp->rx_ring[ring].bnapi->index; - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); - req.lb_rule = cpu_to_le16(0xffff); +vnic_mru: req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); #ifdef CONFIG_BNXT_SRIOV if (BNXT_VF(bp)) def_vlan = bp->vf.vlan; @@ -4195,6 +4589,10 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; struct hwrm_vnic_alloc_input req = {0}; struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + goto vnic_no_ring_grps; /* map ring groups to this vnic */ for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { @@ -4204,12 +4602,12 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, j, nr_rings); break; } - bp->vnic_info[vnic_id].fw_grp_ids[j] = - bp->grp_info[grp_idx].fw_grp_id; + vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; } - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; +vnic_no_ring_grps: + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) + vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; if (vnic_id == 0) req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); @@ -4218,7 +4616,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) - bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); + vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -4238,7 +4636,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (!rc) { u32 flags = le32_to_cpu(resp->flags); - if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP) + if (!(bp->flags & BNXT_FLAG_CHIP_P5) && + (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) bp->flags |= BNXT_FLAG_NEW_RSS_CAP; if (flags & VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) @@ -4253,6 +4652,9 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) u16 i; u32 rc = 0; + if (bp->flags & BNXT_FLAG_CHIP_P5) + return 0; + mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < bp->rx_nr_rings; i++) { struct hwrm_ring_grp_alloc_input req = {0}; @@ -4285,7 +4687,7 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) u32 rc = 0; struct hwrm_ring_grp_free_input req = {0}; - if (!bp->grp_info) + if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); @@ -4314,45 +4716,90 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, int rc = 0, err = 0; struct hwrm_ring_alloc_input req = {0}; struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_ring_mem_info *rmem = &ring->ring_mem; struct bnxt_ring_grp_info *grp_info; u16 ring_id; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); req.enables = 0; - if (ring->nr_pages > 1) { - req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map); + if (rmem->nr_pages > 1) { + req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); /* Page size is in log2 units */ req.page_size = BNXT_PAGE_SHIFT; req.page_tbl_depth = 1; } else { - req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]); + req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); } req.fbo = 0; /* Association of ring index with doorbell index and MSIX number */ req.logical_id = cpu_to_le16(map_index); switch (ring_type) { - case HWRM_RING_ALLOC_TX: + case HWRM_RING_ALLOC_TX: { + struct bnxt_tx_ring_info *txr; + + txr = container_of(ring, struct bnxt_tx_ring_info, + tx_ring_struct); req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; /* Association of transmit ring with completion ring */ grp_info = &bp->grp_info[ring->grp_idx]; - req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); + req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); req.length = cpu_to_le32(bp->tx_ring_mask + 1); req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); req.queue_id = cpu_to_le16(ring->queue_id); break; + } case HWRM_RING_ALLOC_RX: req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; req.length = cpu_to_le32(bp->rx_ring_mask + 1); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + u16 flags = 0; + + /* Association of rx ring with stats context */ + grp_info = &bp->grp_info[ring->grp_idx]; + req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); + req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req.enables |= cpu_to_le32( + RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); + if (NET_IP_ALIGN == 2) + flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; + req.flags = cpu_to_le16(flags); + } break; case HWRM_RING_ALLOC_AGG: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; + /* Association of agg ring with rx ring */ + grp_info = &bp->grp_info[ring->grp_idx]; + req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); + req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); + req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req.enables |= cpu_to_le32( + RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | + RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); + } else { + req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + } req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); break; case HWRM_RING_ALLOC_CMPL: req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; req.length = cpu_to_le32(bp->cp_ring_mask + 1); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + /* Association of cp ring with nq */ + grp_info = &bp->grp_info[map_index]; + req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); + req.cq_handle = cpu_to_le64(ring->handle); + req.enables |= cpu_to_le32( + RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); + } else if (bp->flags & BNXT_FLAG_USING_MSIX) { + req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + } + break; + case HWRM_RING_ALLOC_NQ: + req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; + req.length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_USING_MSIX) req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; break; @@ -4401,22 +4848,67 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) return rc; } +static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, + u32 map_idx, u32 xid) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (BNXT_PF(bp)) + db->doorbell = bp->bar1 + 0x10000; + else + db->doorbell = bp->bar1 + 0x4000; + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; + break; + case HWRM_RING_ALLOC_RX: + case HWRM_RING_ALLOC_AGG: + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; + break; + case HWRM_RING_ALLOC_CMPL: + db->db_key64 = DBR_PATH_L2; + break; + case HWRM_RING_ALLOC_NQ: + db->db_key64 = DBR_PATH_L2; + break; + } + db->db_key64 |= (u64)xid << DBR_XID_SFT; + } else { + db->doorbell = bp->bar1 + map_idx * 0x80; + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + db->db_key32 = DB_KEY_TX; + break; + case HWRM_RING_ALLOC_RX: + case HWRM_RING_ALLOC_AGG: + db->db_key32 = DB_KEY_RX; + break; + case HWRM_RING_ALLOC_CMPL: + db->db_key32 = DB_KEY_CP; + break; + } + } +} + static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { int i, rc = 0; + u32 type; + if (bp->flags & BNXT_FLAG_CHIP_P5) + type = HWRM_RING_ALLOC_NQ; + else + type = HWRM_RING_ALLOC_CMPL; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; u32 map_idx = ring->map_idx; - cpr->cp_doorbell = bp->bar1 + map_idx * 0x80; - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, - map_idx); + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); if (rc) goto err_out; - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); + bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; if (!i) { @@ -4426,33 +4918,69 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) } } + type = HWRM_RING_ALLOC_TX; for (i = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; - struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u32 map_idx = i; - - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, - map_idx); + struct bnxt_ring_struct *ring; + u32 map_idx; + + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_napi *bnapi = txr->bnapi; + struct bnxt_cp_ring_info *cpr, *cpr2; + u32 type2 = HWRM_RING_ALLOC_CMPL; + + cpr = &bnapi->cp_ring; + cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL]; + ring = &cpr2->cp_ring_struct; + ring->handle = BNXT_TX_HDL; + map_idx = bnapi->index; + rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); + if (rc) + goto err_out; + bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, + ring->fw_ring_id); + bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); + } + ring = &txr->tx_ring_struct; + map_idx = i; + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); if (rc) goto err_out; - txr->tx_doorbell = bp->bar1 + map_idx * 0x80; + bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); } + type = HWRM_RING_ALLOC_RX; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; - u32 map_idx = rxr->bnapi->index; + struct bnxt_napi *bnapi = rxr->bnapi; + u32 map_idx = bnapi->index; - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, - map_idx); + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); if (rc) goto err_out; - rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; - writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u32 type2 = HWRM_RING_ALLOC_CMPL; + struct bnxt_cp_ring_info *cpr2; + + cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL]; + ring = &cpr2->cp_ring_struct; + ring->handle = BNXT_RX_HDL; + rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); + if (rc) + goto err_out; + bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, + ring->fw_ring_id); + bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); + } } if (bp->flags & BNXT_FLAG_AGG_RINGS) { + type = HWRM_RING_ALLOC_AGG; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = @@ -4460,15 +4988,13 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) u32 grp_idx = ring->grp_idx; u32 map_idx = grp_idx + bp->rx_nr_rings; - rc = hwrm_ring_alloc_send_msg(bp, ring, - HWRM_RING_ALLOC_AGG, - map_idx); + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); if (rc) goto err_out; - rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80; - writel(DB_KEY_RX | rxr->rx_agg_prod, - rxr->rx_agg_doorbell); + bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, + ring->fw_ring_id); + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; } } @@ -4504,6 +5030,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) { + u32 type; int i; if (!bp->bnapi) @@ -4512,9 +5039,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) for (i = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u32 grp_idx = txr->bnapi->index; - u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; + u32 cmpl_ring_id; + cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); if (ring->fw_ring_id != INVALID_HW_RING_ID) { hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX, @@ -4528,8 +5055,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; u32 grp_idx = rxr->bnapi->index; - u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; + u32 cmpl_ring_id; + cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); if (ring->fw_ring_id != INVALID_HW_RING_ID) { hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_RX, @@ -4541,15 +5069,19 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } + if (bp->flags & BNXT_FLAG_CHIP_P5) + type = RING_FREE_REQ_RING_TYPE_RX_AGG; + else + type = RING_FREE_REQ_RING_TYPE_RX; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; u32 grp_idx = rxr->bnapi->index; - u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; + u32 cmpl_ring_id; + cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); if (ring->fw_ring_id != INVALID_HW_RING_ID) { - hwrm_ring_free_send_msg(bp, ring, - RING_FREE_REQ_RING_TYPE_RX, + hwrm_ring_free_send_msg(bp, ring, type, close_path ? cmpl_ring_id : INVALID_HW_RING_ID); ring->fw_ring_id = INVALID_HW_RING_ID; @@ -4564,14 +5096,32 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) */ bnxt_disable_int_sync(bp); + if (bp->flags & BNXT_FLAG_CHIP_P5) + type = RING_FREE_REQ_RING_TYPE_NQ; + else + type = RING_FREE_REQ_RING_TYPE_L2_CMPL; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + struct bnxt_ring_struct *ring; + int j; + + for (j = 0; j < 2; j++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; + if (cpr2) { + ring = &cpr2->cp_ring_struct; + if (ring->fw_ring_id == INVALID_HW_RING_ID) + continue; + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_L2_CMPL, + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + } + } + ring = &cpr->cp_ring_struct; if (ring->fw_ring_id != INVALID_HW_RING_ID) { - hwrm_ring_free_send_msg(bp, ring, - RING_FREE_REQ_RING_TYPE_L2_CMPL, + hwrm_ring_free_send_msg(bp, ring, type, INVALID_HW_RING_ID); ring->fw_ring_id = INVALID_HW_RING_ID; bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; @@ -4579,6 +5129,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } +static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, + bool shared); + static int bnxt_hwrm_get_rings(struct bnxt *bp) { struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; @@ -4609,6 +5162,22 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) cp = le16_to_cpu(resp->alloc_cmpl_rings); stats = le16_to_cpu(resp->alloc_stat_ctx); cp = min_t(u16, cp, stats); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + int rx = hw_resc->resv_rx_rings; + int tx = hw_resc->resv_tx_rings; + + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx >>= 1; + if (cp < (rx + tx)) { + bnxt_trim_rings(bp, &rx, &tx, cp, false); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx <<= 1; + hw_resc->resv_rx_rings = rx; + hw_resc->resv_tx_rings = tx; + } + cp = le16_to_cpu(resp->alloc_msix); + hw_resc->resv_hw_ring_grps = rx; + } hw_resc->resv_cp_rings = cp; } mutex_unlock(&bp->hwrm_cmd_lock); @@ -4634,6 +5203,8 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) return rc; } +static bool bnxt_rfs_supported(struct bnxt *bp); + static void __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, int tx_rings, int rx_rings, int ring_grps, @@ -4647,15 +5218,38 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, req->num_tx_rings = cpu_to_le16(tx_rings); if (BNXT_NEW_RM(bp)) { enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; - enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; - enables |= ring_grps ? - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; + enables |= tx_rings + ring_grps ? + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= rx_rings ? + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; + } else { + enables |= cp_rings ? + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= ring_grps ? + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; + } enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; req->num_rx_rings = cpu_to_le16(rx_rings); - req->num_hw_ring_grps = cpu_to_le16(ring_grps); - req->num_cmpl_rings = cpu_to_le16(cp_rings); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); + req->num_msix = cpu_to_le16(cp_rings); + req->num_rsscos_ctxs = + cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); + } else { + req->num_cmpl_rings = cpu_to_le16(cp_rings); + req->num_hw_ring_grps = cpu_to_le16(ring_grps); + req->num_rsscos_ctxs = cpu_to_le16(1); + if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) && + bnxt_rfs_supported(bp)) + req->num_rsscos_ctxs = + cpu_to_le16(ring_grps + 1); + } req->num_stat_ctxs = req->num_cmpl_rings; req->num_vnics = cpu_to_le16(vnics); } @@ -4672,16 +5266,33 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; - enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; - enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; - enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + enables |= tx_rings + ring_grps ? + FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + } else { + enables |= cp_rings ? + FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= ring_grps ? + FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + } enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; + enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; + req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); req->num_tx_rings = cpu_to_le16(tx_rings); req->num_rx_rings = cpu_to_le16(rx_rings); - req->num_hw_ring_grps = cpu_to_le16(ring_grps); - req->num_cmpl_rings = cpu_to_le16(cp_rings); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); + req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); + } else { + req->num_cmpl_rings = cpu_to_le16(cp_rings); + req->num_hw_ring_grps = cpu_to_le16(ring_grps); + req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); + } req->num_stat_ctxs = req->num_cmpl_rings; req->num_vnics = cpu_to_le16(vnics); @@ -4725,10 +5336,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, cp_rings, vnics); - req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | - FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS); - req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); - req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4774,20 +5381,19 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) if (hw_resc->resv_tx_rings != bp->tx_nr_rings) return true; - if (bp->flags & BNXT_FLAG_RFS) + if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) vnic = rx + 1; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; if (BNXT_NEW_RM(bp) && (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || - hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic)) + hw_resc->resv_vnics != vnic || + (hw_resc->resv_hw_ring_grps != grp && + !(bp->flags & BNXT_FLAG_CHIP_P5)))) return true; return false; } -static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, - bool shared); - static int __bnxt_reserve_rings(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; @@ -4803,7 +5409,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) if (bp->flags & BNXT_FLAG_SHARED_RINGS) sh = true; - if (bp->flags & BNXT_FLAG_RFS) + if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) vnic = rx + 1; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; @@ -4866,9 +5472,11 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | - FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | - FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; + FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | + FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; req.flags = cpu_to_le32(flags); rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -4887,12 +5495,16 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, cp_rings, vnics); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; - if (BNXT_NEW_RM(bp)) + if (BNXT_NEW_RM(bp)) { flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | - FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; + if (bp->flags & BNXT_FLAG_CHIP_P5) + flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; + else + flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; + } req.flags = cpu_to_le32(flags); rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -4915,46 +5527,140 @@ static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, cp_rings, vnics); } -static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, +static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) +{ + struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + struct hwrm_ring_aggint_qcaps_input req = {0}; + int rc; + + coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; + coal_cap->num_cmpl_dma_aggr_max = 63; + coal_cap->num_cmpl_dma_aggr_during_int_max = 63; + coal_cap->cmpl_aggr_dma_tmr_max = 65535; + coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; + coal_cap->int_lat_tmr_min_max = 65535; + coal_cap->int_lat_tmr_max_max = 65535; + coal_cap->num_cmpl_aggr_int_max = 65535; + coal_cap->timer_units = 80; + + if (bp->hwrm_spec_code < 0x10902) + return; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); + coal_cap->nq_params = le32_to_cpu(resp->nq_params); + coal_cap->num_cmpl_dma_aggr_max = + le16_to_cpu(resp->num_cmpl_dma_aggr_max); + coal_cap->num_cmpl_dma_aggr_during_int_max = + le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); + coal_cap->cmpl_aggr_dma_tmr_max = + le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); + coal_cap->cmpl_aggr_dma_tmr_during_int_max = + le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); + coal_cap->int_lat_tmr_min_max = + le16_to_cpu(resp->int_lat_tmr_min_max); + coal_cap->int_lat_tmr_max_max = + le16_to_cpu(resp->int_lat_tmr_max_max); + coal_cap->num_cmpl_aggr_int_max = + le16_to_cpu(resp->num_cmpl_aggr_int_max); + coal_cap->timer_units = le16_to_cpu(resp->timer_units); + } + mutex_unlock(&bp->hwrm_cmd_lock); +} + +static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) +{ + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + + return usec * 1000 / coal_cap->timer_units; +} + +static void bnxt_hwrm_set_coal_params(struct bnxt *bp, + struct bnxt_coal *hw_coal, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) { - u16 val, tmr, max, flags; + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + u32 cmpl_params = coal_cap->cmpl_params; + u16 val, tmr, max, flags = 0; max = hw_coal->bufs_per_record * 128; if (hw_coal->budget) max = hw_coal->bufs_per_record * hw_coal->budget; + max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); val = clamp_t(u16, hw_coal->coal_bufs, 1, max); req->num_cmpl_aggr_int = cpu_to_le16(val); - /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ - val = min_t(u16, val, 63); + val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); req->num_cmpl_dma_aggr = cpu_to_le16(val); - /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ - val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63); + val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, + coal_cap->num_cmpl_dma_aggr_during_int_max); req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); - tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks); - tmr = max_t(u16, tmr, 1); + tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); + tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); req->int_lat_tmr_max = cpu_to_le16(tmr); /* min timer set to 1/2 of interrupt timer */ - val = tmr / 2; - req->int_lat_tmr_min = cpu_to_le16(val); + if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { + val = tmr / 2; + val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); + req->int_lat_tmr_min = cpu_to_le16(val); + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); + } /* buf timer set to 1/4 of interrupt timer */ - val = max_t(u16, tmr / 4, 1); + val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); req->cmpl_aggr_dma_tmr = cpu_to_le16(val); - tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq); - tmr = max_t(u16, tmr, 1); - req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); + if (cmpl_params & + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { + tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); + val = clamp_t(u16, tmr, 1, + coal_cap->cmpl_aggr_dma_tmr_during_int_max); + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); + req->enables |= + cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); + } - flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; - if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) + if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) + flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && + hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; req->flags = cpu_to_le16(flags); + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); +} + +/* Caller holds bp->hwrm_cmd_lock */ +static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, + struct bnxt_coal *hw_coal) +{ + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + u32 nq_params = coal_cap->nq_params; + u16 tmr; + + if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, + -1, -1); + req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); + req.flags = + cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); + + tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; + tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); + req.int_lat_tmr_min = cpu_to_le16(tmr); + req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); + return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) @@ -4962,7 +5668,6 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_coal coal; - unsigned int grp_idx; /* Tick values in micro seconds. * 1 coal_buf x bufs_per_record = 1 completion record. @@ -4978,10 +5683,9 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) bnxt_hwrm_cmd_hdr_init(bp, &req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); - bnxt_hwrm_set_coal_params(&coal, &req_rx); + bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); - grp_idx = bnapi->index; - req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); + req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); return hwrm_send_message(bp, &req_rx, sizeof(req_rx), HWRM_CMD_TIMEOUT); @@ -4998,22 +5702,46 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) bnxt_hwrm_cmd_hdr_init(bp, &req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); - bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx); - bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx); + bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); + bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_coal *hw_coal; + u16 ring_id; req = &req_rx; - if (!bnapi->rx_ring) + if (!bnapi->rx_ring) { + ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); req = &req_tx; - req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); + } else { + ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); + } + req->ring_id = cpu_to_le16(ring_id); rc = _hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); if (rc) break; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + continue; + + if (bnapi->rx_ring && bnapi->tx_ring) { + req = &req_tx; + ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); + req->ring_id = cpu_to_le16(ring_id); + rc = _hwrm_send_message(bp, req, sizeof(*req), + HWRM_CMD_TIMEOUT); + if (rc) + break; + } + if (bnapi->rx_ring) + hw_coal = &bp->rx_coal; + else + hw_coal = &bp->tx_coal; + __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); } mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -5140,6 +5868,304 @@ func_qcfg_exit: return rc; } +static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) +{ + struct hwrm_func_backing_store_qcaps_input req = {0}; + struct hwrm_func_backing_store_qcaps_output *resp = + bp->hwrm_cmd_resp_addr; + int rc; + + if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ctx_mem_info *ctx; + int i; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + rc = -ENOMEM; + goto ctx_err; + } + ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL); + if (!ctx_pg) { + kfree(ctx); + rc = -ENOMEM; + goto ctx_err; + } + for (i = 0; i < bp->max_q + 1; i++, ctx_pg++) + ctx->tqm_mem[i] = ctx_pg; + + bp->ctx = ctx; + ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); + ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); + ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); + ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); + ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); + ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); + ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size); + ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); + ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); + ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); + ctx->vnic_max_vnic_entries = + le16_to_cpu(resp->vnic_max_vnic_entries); + ctx->vnic_max_ring_table_entries = + le16_to_cpu(resp->vnic_max_ring_table_entries); + ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); + ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); + ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); + ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); + ctx->tqm_min_entries_per_ring = + le32_to_cpu(resp->tqm_min_entries_per_ring); + ctx->tqm_max_entries_per_ring = + le32_to_cpu(resp->tqm_max_entries_per_ring); + ctx->tqm_entries_multiple = resp->tqm_entries_multiple; + if (!ctx->tqm_entries_multiple) + ctx->tqm_entries_multiple = 1; + ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); + ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); + ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); + ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); + } else { + rc = 0; + } +ctx_err: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, + __le64 *pg_dir) +{ + u8 pg_size = 0; + + if (BNXT_PAGE_SHIFT == 13) + pg_size = 1 << 4; + else if (BNXT_PAGE_SIZE == 16) + pg_size = 2 << 4; + + *pg_attr = pg_size; + if (rmem->nr_pages > 1) { + *pg_attr |= 1; + *pg_dir = cpu_to_le64(rmem->pg_tbl_map); + } else { + *pg_dir = cpu_to_le64(rmem->dma_arr[0]); + } +} + +#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ + (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) + +static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) +{ + struct hwrm_func_backing_store_cfg_input req = {0}; + struct bnxt_ctx_mem_info *ctx = bp->ctx; + struct bnxt_ctx_pg_info *ctx_pg; + __le32 *num_entries; + __le64 *pg_dir; + u8 *pg_attr; + int i, rc; + u32 ena; + + if (!ctx) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); + req.enables = cpu_to_le32(enables); + + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { + ctx_pg = &ctx->qp_mem; + req.qp_num_entries = cpu_to_le32(ctx_pg->entries); + req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); + req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); + req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.qpc_pg_size_qpc_lvl, + &req.qpc_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { + ctx_pg = &ctx->srq_mem; + req.srq_num_entries = cpu_to_le32(ctx_pg->entries); + req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); + req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.srq_pg_size_srq_lvl, + &req.srq_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { + ctx_pg = &ctx->cq_mem; + req.cq_num_entries = cpu_to_le32(ctx_pg->entries); + req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); + req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, + &req.cq_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { + ctx_pg = &ctx->vnic_mem; + req.vnic_num_vnic_entries = + cpu_to_le16(ctx->vnic_max_vnic_entries); + req.vnic_num_ring_table_entries = + cpu_to_le16(ctx->vnic_max_ring_table_entries); + req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.vnic_pg_size_vnic_lvl, + &req.vnic_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { + ctx_pg = &ctx->stat_mem; + req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); + req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.stat_pg_size_stat_lvl, + &req.stat_page_dir); + } + for (i = 0, num_entries = &req.tqm_sp_num_entries, + pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, + pg_dir = &req.tqm_sp_page_dir, + ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; + i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { + if (!(enables & ena)) + continue; + + req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); + ctx_pg = ctx->tqm_mem[i]; + *num_entries = cpu_to_le32(ctx_pg->entries); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); + } + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + return rc; +} + +static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + + if (!mem_size) + return 0; + + rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); + if (rmem->nr_pages > MAX_CTX_PAGES) { + rmem->nr_pages = 0; + return -EINVAL; + } + rmem->page_size = BNXT_PAGE_SIZE; + rmem->pg_arr = ctx_pg->ctx_pg_arr; + rmem->dma_arr = ctx_pg->ctx_dma_arr; + rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; + return bnxt_alloc_ring(bp, rmem); +} + +static void bnxt_free_ctx_mem(struct bnxt *bp) +{ + struct bnxt_ctx_mem_info *ctx = bp->ctx; + int i; + + if (!ctx) + return; + + if (ctx->tqm_mem[0]) { + for (i = 0; i < bp->max_q + 1; i++) + bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem); + kfree(ctx->tqm_mem[0]); + ctx->tqm_mem[0] = NULL; + } + + bnxt_free_ring(bp, &ctx->stat_mem.ring_mem); + bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem); + bnxt_free_ring(bp, &ctx->cq_mem.ring_mem); + bnxt_free_ring(bp, &ctx->srq_mem.ring_mem); + bnxt_free_ring(bp, &ctx->qp_mem.ring_mem); + ctx->flags &= ~BNXT_CTX_FLAG_INITED; +} + +static int bnxt_alloc_ctx_mem(struct bnxt *bp) +{ + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ctx_mem_info *ctx; + u32 mem_size, ena, entries; + int i, rc; + + rc = bnxt_hwrm_func_backing_store_qcaps(bp); + if (rc) { + netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", + rc); + return rc; + } + ctx = bp->ctx; + if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) + return 0; + + ctx_pg = &ctx->qp_mem; + ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; + mem_size = ctx->qp_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + if (rc) + return rc; + + ctx_pg = &ctx->srq_mem; + ctx_pg->entries = ctx->srq_max_l2_entries; + mem_size = ctx->srq_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + if (rc) + return rc; + + ctx_pg = &ctx->cq_mem; + ctx_pg->entries = ctx->cq_max_l2_entries; + mem_size = ctx->cq_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + if (rc) + return rc; + + ctx_pg = &ctx->vnic_mem; + ctx_pg->entries = ctx->vnic_max_vnic_entries + + ctx->vnic_max_ring_table_entries; + mem_size = ctx->vnic_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + if (rc) + return rc; + + ctx_pg = &ctx->stat_mem; + ctx_pg->entries = ctx->stat_max_entries; + mem_size = ctx->stat_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + if (rc) + return rc; + + entries = ctx->qp_max_l2_entries; + entries = roundup(entries, ctx->tqm_entries_multiple); + entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring, + ctx->tqm_max_entries_per_ring); + for (i = 0, ena = 0; i < bp->max_q + 1; i++) { + ctx_pg = ctx->tqm_mem[i]; + ctx_pg->entries = entries; + mem_size = ctx->tqm_entry_size * entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + if (rc) + return rc; + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; + } + ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; + rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); + if (rc) + netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", + rc); + else + ctx->flags |= BNXT_CTX_FLAG_INITED; + + return 0; +} + int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) { struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; @@ -5178,6 +6204,13 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + u16 max_msix = le16_to_cpu(resp->max_msix); + + hw_resc->max_irqs = min_t(u16, hw_resc->max_irqs, max_msix); + hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; + } + if (BNXT_PF(bp)) { struct bnxt_pf_info *pf = &bp->pf; @@ -5267,6 +6300,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) if (rc) return rc; if (bp->hwrm_spec_code >= 0x10803) { + rc = bnxt_alloc_ctx_mem(bp); + if (rc) + return rc; rc = bnxt_hwrm_func_resc_qcaps(bp, true); if (!rc) bp->fw_cap |= BNXT_FW_CAP_NEW_RM; @@ -5311,13 +6347,15 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); qptr = &resp->queue_id0; for (i = 0, j = 0; i < bp->max_tc; i++) { - bp->q_info[j].queue_id = *qptr++; + bp->q_info[j].queue_id = *qptr; + bp->q_ids[i] = *qptr++; bp->q_info[j].queue_profile = *qptr++; bp->tc_to_qidx[j] = j; if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || (no_rdma && BNXT_PF(bp))) j++; } + bp->max_q = bp->max_tc; bp->max_tc = max_t(u8, j, 1); if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) @@ -5367,8 +6405,12 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) if (!bp->hwrm_cmd_timeout) bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; - if (resp->hwrm_intf_maj_8b >= 1) + if (resp->hwrm_intf_maj_8b >= 1) { bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); + bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); + } + if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) + bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; bp->chip_num = le16_to_cpu(resp->chip_num); if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && @@ -5425,8 +6467,10 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp) static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) { + struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_port_qstats_ext_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; + int rc; if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) return 0; @@ -5435,7 +6479,19 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) req.port_id = cpu_to_le16(pf->port_id); req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext)); + req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; + bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8; + } else { + bp->fw_rx_stats_ext_size = 0; + bp->fw_tx_stats_ext_size = 0; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; } static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) @@ -5540,7 +6596,7 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) return rc; } -static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) +static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; int rc; @@ -5596,6 +6652,53 @@ vnic_setup_err: return rc; } +static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) +{ + int rc, i, nr_ctxs; + + nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); + for (i = 0; i < nr_ctxs; i++) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", + vnic_id, i, rc); + break; + } + bp->rsscos_nr_ctxs++; + } + if (i < nr_ctxs) + return -ENOMEM; + + rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", + vnic_id, rc); + return rc; + } + rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", + vnic_id, rc); + return rc; + } + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", + vnic_id, rc); + } + } + return rc; +} + +static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + return __bnxt_setup_vnic_p5(bp, vnic_id); + else + return __bnxt_setup_vnic(bp, vnic_id); +} + static int bnxt_alloc_rfs_vnics(struct bnxt *bp) { #ifdef CONFIG_RFS_ACCEL @@ -6214,12 +7317,15 @@ static void bnxt_init_napi(struct bnxt *bp) struct bnxt_napi *bnapi; if (bp->flags & BNXT_FLAG_USING_MSIX) { - if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + poll_fn = bnxt_poll_p5; + else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) cp_nr_rings--; for (i = 0; i < cp_nr_rings; i++) { bnapi = bp->bnapi[i]; - netif_napi_add(bp->dev, &bnapi->napi, - bnxt_poll, 64); + netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64); } if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { bnapi = bp->bnapi[cp_nr_rings]; @@ -6976,10 +8082,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) netdev_err(bp->dev, "Failed to reserve default rings at open\n"); return rc; } - rc = bnxt_reserve_rings(bp); - if (rc) - return rc; } + rc = bnxt_reserve_rings(bp); + if (rc) + return rc; if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_USING_MSIX)) { /* disable RFS if falling back to INTA */ @@ -7451,6 +8557,8 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp) /* If the chip and firmware supports RFS */ static bool bnxt_rfs_supported(struct bnxt *bp) { + if (bp->flags & BNXT_FLAG_CHIP_P5) + return false; if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) return true; if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) @@ -7464,6 +8572,8 @@ static bool bnxt_rfs_capable(struct bnxt *bp) #ifdef CONFIG_RFS_ACCEL int vnics, max_vnics, max_rss_ctxs; + if (bp->flags & BNXT_FLAG_CHIP_P5) + return false; if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) return false; @@ -7984,6 +9094,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) INIT_WORK(&bp->sp_task, bnxt_sp_task); spin_lock_init(&bp->ntp_fltr_lock); +#if BITS_PER_LONG == 32 + spin_lock_init(&bp->db_lock); +#endif bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; @@ -8549,6 +9662,9 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_dcb_free(bp); kfree(bp->edev); bp->edev = NULL; + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; bnxt_cleanup_pci(bp); free_netdev(dev); } @@ -8854,6 +9970,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; bp = netdev_priv(dev); + bnxt_set_max_func_irqs(bp, max_irqs); if (bnxt_vf_pciid(ent->driver_data)) bp->flags |= BNXT_FLAG_VF; @@ -8880,12 +9997,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; - if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) { + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || + bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { rc = bnxt_alloc_hwrm_short_cmd_req(bp); if (rc) goto init_err_pci_clean; } + if (BNXT_CHIP_P5(bp)) + bp->flags |= BNXT_FLAG_CHIP_P5; + rc = bnxt_hwrm_func_reset(bp); if (rc) goto init_err_pci_clean; @@ -8900,7 +10021,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_GRO; - if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) + if (BNXT_SUPPORTS_TPA(bp)) dev->hw_features |= NETIF_F_LRO; dev->hw_enc_features = @@ -8914,7 +10035,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; - if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) + if (BNXT_SUPPORTS_TPA(bp)) dev->hw_features |= NETIF_F_GRO_HW; dev->features |= dev->hw_features | NETIF_F_HIGHDMA; if (dev->features & NETIF_F_GRO_HW) @@ -8925,10 +10046,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) init_waitqueue_head(&bp->sriov_cfg_wait); mutex_init(&bp->sriov_lock); #endif - bp->gro_func = bnxt_gro_func_5730x; - if (BNXT_CHIP_P4_PLUS(bp)) - bp->gro_func = bnxt_gro_func_5731x; - else + if (BNXT_SUPPORTS_TPA(bp)) { + bp->gro_func = bnxt_gro_func_5730x; + if (BNXT_CHIP_P4(bp)) + bp->gro_func = bnxt_gro_func_5731x; + } + if (!BNXT_CHIP_P4_PLUS(bp)) bp->flags |= BNXT_FLAG_DOUBLE_DB; rc = bnxt_hwrm_func_drv_rgtr(bp); @@ -8941,6 +10064,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp->ulp_probe = bnxt_ulp_probe; + rc = bnxt_hwrm_queue_qportcfg(bp); + if (rc) { + netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", + rc); + rc = -1; + goto init_err_pci_clean; + } /* Get the MAX capabilities for this function */ rc = bnxt_hwrm_func_qcaps(bp); if (rc) { @@ -8955,13 +10085,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = -EADDRNOTAVAIL; goto init_err_pci_clean; } - rc = bnxt_hwrm_queue_qportcfg(bp); - if (rc) { - netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", - rc); - rc = -1; - goto init_err_pci_clean; - } bnxt_hwrm_func_qcfg(bp); bnxt_hwrm_port_led_qcaps(bp); @@ -8979,7 +10102,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); - bnxt_set_max_func_irqs(bp, max_irqs); rc = bnxt_set_dflt_rings(bp, true); if (rc) { netdev_err(bp->dev, "Not enough rings available.\n"); @@ -8992,7 +10114,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; - if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { + if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) { bp->flags |= BNXT_FLAG_UDP_RSS_CAP; bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; @@ -9027,6 +10149,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); + bnxt_hwrm_coal_params_qcaps(bp); + if (BNXT_PF(bp)) { if (!bnxt_pf_wq) { bnxt_pf_wq = @@ -9059,6 +10183,9 @@ init_err_cleanup_tc: init_err_pci_clean: bnxt_free_hwrm_resources(bp); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; bnxt_cleanup_pci(bp); init_err_free: @@ -9227,13 +10354,6 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) rtnl_unlock(); - err = pci_cleanup_aer_uncorrect_error_status(pdev); - if (err) { - dev_err(&pdev->dev, - "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", - err); /* non-fatal, continue */ - } - return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index bde384630a75..498b373c992d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -12,11 +12,11 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.9.2" +#define DRV_MODULE_VERSION "1.10.0" #define DRV_VER_MAJ 1 -#define DRV_VER_MIN 9 -#define DRV_VER_UPD 2 +#define DRV_VER_MIN 10 +#define DRV_VER_UPD 0 #include <linux/interrupt.h> #include <linux/rhashtable.h> @@ -403,6 +403,19 @@ struct rx_tpa_end_cmp_ext { ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \ cpu_to_le32(RX_TPA_END_CMP_ERRORS)) +struct nqe_cn { + __le16 type; + #define NQ_CN_TYPE_MASK 0x3fUL + #define NQ_CN_TYPE_SFT 0 + #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL + #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION + __le16 reserved16; + __le32 cq_handle_low; + __le32 v; + #define NQ_CN_V 0x1UL + __le32 cq_handle_high; +}; + #define DB_IDX_MASK 0xffffff #define DB_IDX_VALID (0x1 << 26) #define DB_IRQ_DIS (0x1 << 27) @@ -416,6 +429,25 @@ struct rx_tpa_end_cmp_ext { #define BNXT_MIN_ROCE_CP_RINGS 2 #define BNXT_MIN_ROCE_STAT_CTXS 1 +/* 64-bit doorbell */ +#define DBR_INDEX_MASK 0x0000000000ffffffULL +#define DBR_XID_MASK 0x000fffff00000000ULL +#define DBR_XID_SFT 32 +#define DBR_PATH_L2 (0x1ULL << 56) +#define DBR_TYPE_SQ (0x0ULL << 60) +#define DBR_TYPE_RQ (0x1ULL << 60) +#define DBR_TYPE_SRQ (0x2ULL << 60) +#define DBR_TYPE_SRQ_ARM (0x3ULL << 60) +#define DBR_TYPE_CQ (0x4ULL << 60) +#define DBR_TYPE_CQ_ARMSE (0x5ULL << 60) +#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60) +#define DBR_TYPE_CQ_ARMENA (0x7ULL << 60) +#define DBR_TYPE_SRQ_ARMENA (0x8ULL << 60) +#define DBR_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60) +#define DBR_TYPE_NQ (0xaULL << 60) +#define DBR_TYPE_NQ_ARM (0xbULL << 60) +#define DBR_TYPE_NULL (0xfULL << 60) + #define INVALID_HW_RING_ID ((u16)-1) /* The hardware supports certain page sizes. Use the supported page sizes @@ -505,6 +537,9 @@ struct rx_tpa_end_cmp_ext { (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \ !((raw_cons) & bp->cp_bit)) +#define NQ_CMP_VALID(nqcmp, raw_cons) \ + (!!((nqcmp)->v & cpu_to_le32(NQ_CN_V)) == !((raw_cons) & bp->cp_bit)) + #define TX_CMP_TYPE(txcmp) \ (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE) @@ -577,9 +612,13 @@ struct bnxt_sw_rx_agg_bd { dma_addr_t mapping; }; -struct bnxt_ring_struct { +struct bnxt_ring_mem_info { int nr_pages; int page_size; + u32 flags; +#define BNXT_RMEM_VALID_PTE_FLAG 1 +#define BNXT_RMEM_RING_PTE_FLAG 2 + void **pg_arr; dma_addr_t *dma_arr; @@ -588,12 +627,17 @@ struct bnxt_ring_struct { int vmem_size; void **vmem; +}; + +struct bnxt_ring_struct { + struct bnxt_ring_mem_info ring_mem; u16 fw_ring_id; /* Ring id filled by Chimp FW */ union { u16 grp_idx; u16 map_idx; /* Used by cmpl rings */ }; + u32 handle; u8 queue_id; }; @@ -609,12 +653,20 @@ struct tx_push_buffer { u32 data[25]; }; +struct bnxt_db_info { + void __iomem *doorbell; + union { + u64 db_key64; + u32 db_key32; + }; +}; + struct bnxt_tx_ring_info { struct bnxt_napi *bnapi; u16 tx_prod; u16 tx_cons; u16 txq_index; - void __iomem *tx_doorbell; + struct bnxt_db_info tx_db; struct tx_bd *tx_desc_ring[MAX_TX_PAGES]; struct bnxt_sw_tx_bd *tx_buf_ring; @@ -631,6 +683,42 @@ struct bnxt_tx_ring_info { struct bnxt_ring_struct tx_ring_struct; }; +#define BNXT_LEGACY_COAL_CMPL_PARAMS \ + (RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT) + +#define BNXT_COAL_CMPL_ENABLES \ + (RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR | \ + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR | \ + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX | \ + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT) + +#define BNXT_COAL_CMPL_MIN_TMR_ENABLE \ + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN + +#define BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE \ + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT + +struct bnxt_coal_cap { + u32 cmpl_params; + u32 nq_params; + u16 num_cmpl_dma_aggr_max; + u16 num_cmpl_dma_aggr_during_int_max; + u16 cmpl_aggr_dma_tmr_max; + u16 cmpl_aggr_dma_tmr_during_int_max; + u16 int_lat_tmr_min_max; + u16 int_lat_tmr_max_max; + u16 num_cmpl_aggr_int_max; + u16 timer_units; +}; + struct bnxt_coal { u16 coal_ticks; u16 coal_ticks_irq; @@ -675,8 +763,8 @@ struct bnxt_rx_ring_info { u16 rx_agg_prod; u16 rx_sw_agg_prod; u16 rx_next_cons; - void __iomem *rx_doorbell; - void __iomem *rx_agg_doorbell; + struct bnxt_db_info rx_db; + struct bnxt_db_info rx_agg_db; struct bpf_prog *xdp_prog; @@ -703,8 +791,12 @@ struct bnxt_rx_ring_info { }; struct bnxt_cp_ring_info { + struct bnxt_napi *bnapi; u32 cp_raw_cons; - void __iomem *cp_doorbell; + struct bnxt_db_info cp_db; + + u8 had_work_done:1; + u8 has_more_work:1; struct bnxt_coal rx_ring_coal; u64 rx_packets; @@ -713,7 +805,10 @@ struct bnxt_cp_ring_info { struct net_dim dim; - struct tx_cmp *cp_desc_ring[MAX_CP_PAGES]; + union { + struct tx_cmp *cp_desc_ring[MAX_CP_PAGES]; + struct nqe_cn *nq_desc_ring[MAX_CP_PAGES]; + }; dma_addr_t cp_desc_mapping[MAX_CP_PAGES]; @@ -723,6 +818,10 @@ struct bnxt_cp_ring_info { u64 rx_l4_csum_errors; struct bnxt_ring_struct cp_ring_struct; + + struct bnxt_cp_ring_info *cp_ring_arr[2]; +#define BNXT_RX_HDL 0 +#define BNXT_TX_HDL 1 }; struct bnxt_napi { @@ -736,6 +835,9 @@ struct bnxt_napi { void (*tx_int)(struct bnxt *, struct bnxt_napi *, int); + int tx_pkts; + u8 events; + u32 flags; #define BNXT_NAPI_FLAG_XDP 0x1 @@ -755,6 +857,7 @@ struct bnxt_irq { #define HWRM_RING_ALLOC_RX 0x2 #define HWRM_RING_ALLOC_AGG 0x4 #define HWRM_RING_ALLOC_CMPL 0x8 +#define HWRM_RING_ALLOC_NQ 0x10 #define INVALID_STATS_CTX_ID -1 @@ -768,7 +871,7 @@ struct bnxt_ring_grp_info { struct bnxt_vnic_info { u16 fw_vnic_id; /* returned by Chimp during alloc */ -#define BNXT_MAX_CTX_PER_VNIC 2 +#define BNXT_MAX_CTX_PER_VNIC 8 u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC]; u16 fw_l2_ctx_id; #define BNXT_MAX_UC_ADDRS 4 @@ -1069,6 +1172,55 @@ struct bnxt_vf_rep { struct bnxt_vf_rep_stats tx_stats; }; +#define PTU_PTE_VALID 0x1UL +#define PTU_PTE_LAST 0x2UL +#define PTU_PTE_NEXT_TO_LAST 0x4UL + +#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8) + +struct bnxt_ctx_pg_info { + u32 entries; + void *ctx_pg_arr[MAX_CTX_PAGES]; + dma_addr_t ctx_dma_arr[MAX_CTX_PAGES]; + struct bnxt_ring_mem_info ring_mem; +}; + +struct bnxt_ctx_mem_info { + u32 qp_max_entries; + u16 qp_min_qp1_entries; + u16 qp_max_l2_entries; + u16 qp_entry_size; + u16 srq_max_l2_entries; + u32 srq_max_entries; + u16 srq_entry_size; + u16 cq_max_l2_entries; + u32 cq_max_entries; + u16 cq_entry_size; + u16 vnic_max_vnic_entries; + u16 vnic_max_ring_table_entries; + u16 vnic_entry_size; + u32 stat_max_entries; + u16 stat_entry_size; + u16 tqm_entry_size; + u32 tqm_min_entries_per_ring; + u32 tqm_max_entries_per_ring; + u32 mrav_max_entries; + u16 mrav_entry_size; + u16 tim_entry_size; + u32 tim_max_entries; + u8 tqm_entries_multiple; + + u32 flags; + #define BNXT_CTX_FLAG_INITED 0x01 + + struct bnxt_ctx_pg_info qp_mem; + struct bnxt_ctx_pg_info srq_mem; + struct bnxt_ctx_pg_info cq_mem; + struct bnxt_ctx_pg_info vnic_mem; + struct bnxt_ctx_pg_info stat_mem; + struct bnxt_ctx_pg_info *tqm_mem[9]; +}; + struct bnxt { void __iomem *bar0; void __iomem *bar1; @@ -1098,6 +1250,8 @@ struct bnxt { #define CHIP_NUM_5745X 0xd730 +#define CHIP_NUM_57500 0x1750 + #define CHIP_NUM_58802 0xd802 #define CHIP_NUM_58804 0xd804 #define CHIP_NUM_58808 0xd808 @@ -1144,6 +1298,7 @@ struct bnxt { atomic_t intr_sem; u32 flags; + #define BNXT_FLAG_CHIP_P5 0x1 #define BNXT_FLAG_VF 0x2 #define BNXT_FLAG_LRO 0x4 #ifdef CONFIG_INET @@ -1190,15 +1345,24 @@ struct bnxt { #define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp)) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) +#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \ + !(bp->flags & BNXT_FLAG_CHIP_P5)) -/* Chip class phase 4 and later */ -#define BNXT_CHIP_P4_PLUS(bp) \ +/* Chip class phase 5 */ +#define BNXT_CHIP_P5(bp) \ + ((bp)->chip_num == CHIP_NUM_57500) + +/* Chip class phase 4.x */ +#define BNXT_CHIP_P4(bp) \ (BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \ BNXT_CHIP_NUM_5745X((bp)->chip_num) || \ BNXT_CHIP_NUM_588XX((bp)->chip_num) || \ (BNXT_CHIP_NUM_58700((bp)->chip_num) && \ !BNXT_CHIP_TYPE_NITRO_A0(bp))) +#define BNXT_CHIP_P4_PLUS(bp) \ + (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp)) + struct bnxt_en_dev *edev; struct bnxt_en_dev * (*ulp_probe)(struct net_device *); @@ -1261,6 +1425,8 @@ struct bnxt { u8 max_lltc; /* lossless TCs */ struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; u8 tc_to_qidx[BNXT_MAX_QUEUE]; + u8 q_ids[BNXT_MAX_QUEUE]; + u8 max_q; unsigned int current_interval; #define BNXT_TIMER_INTERVAL HZ @@ -1305,12 +1471,17 @@ struct bnxt { struct rx_port_stats *hw_rx_port_stats; struct tx_port_stats *hw_tx_port_stats; struct rx_port_stats_ext *hw_rx_port_stats_ext; + struct tx_port_stats_ext *hw_tx_port_stats_ext; dma_addr_t hw_rx_port_stats_map; dma_addr_t hw_tx_port_stats_map; dma_addr_t hw_rx_port_stats_ext_map; + dma_addr_t hw_tx_port_stats_ext_map; int hw_port_stats_size; + u16 fw_rx_stats_ext_size; + u16 fw_tx_stats_ext_size; u16 hwrm_max_req_len; + u16 hwrm_max_ext_req_len; int hwrm_cmd_timeout; struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ struct hwrm_ver_get_output ver_resp; @@ -1328,11 +1499,10 @@ struct bnxt { u8 port_count; u16 br_mode; + struct bnxt_coal_cap coal_cap; struct bnxt_coal rx_coal; struct bnxt_coal tx_coal; -#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) - u32 stats_coal_ticks; #define BNXT_DEF_STATS_COAL_TICKS 1000000 #define BNXT_MIN_STATS_COAL_TICKS 250000 @@ -1360,6 +1530,7 @@ struct bnxt { struct bnxt_hw_resc hw_resc; struct bnxt_pf_info pf; + struct bnxt_ctx_mem_info *ctx; #ifdef CONFIG_BNXT_SRIOV int nr_vfs; struct bnxt_vf_info vf; @@ -1374,6 +1545,11 @@ struct bnxt { struct mutex sriov_lock; #endif +#if BITS_PER_LONG == 32 + /* ensure atomic 64-bit doorbell writes on 32-bit systems. */ + spinlock_t db_lock; +#endif + #define BNXT_NTP_FLTR_MAX_FLTR 4096 #define BNXT_NTP_FLTR_HASH_SIZE 512 #define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1) @@ -1425,6 +1601,9 @@ struct bnxt { #define BNXT_RX_STATS_EXT_OFFSET(counter) \ (offsetof(struct rx_port_stats_ext, counter) / 8) +#define BNXT_TX_STATS_EXT_OFFSET(counter) \ + (offsetof(struct tx_port_stats_ext, counter) / 8) + #define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A2 0xa2 #define SFF_DIAG_SUPPORT_OFFSET 0x5c @@ -1443,21 +1622,46 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); } +#if BITS_PER_LONG == 32 +#define writeq(val64, db) \ +do { \ + spin_lock(&bp->db_lock); \ + writel((val64) & 0xffffffff, db); \ + writel((val64) >> 32, (db) + 4); \ + spin_unlock(&bp->db_lock); \ +} while (0) + +#define writeq_relaxed writeq +#endif + /* For TX and RX ring doorbells with no ordering guarantee*/ -static inline void bnxt_db_write_relaxed(struct bnxt *bp, void __iomem *db, - u32 val) +static inline void bnxt_db_write_relaxed(struct bnxt *bp, + struct bnxt_db_info *db, u32 idx) { - writel_relaxed(val, db); - if (bp->flags & BNXT_FLAG_DOUBLE_DB) - writel_relaxed(val, db); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + writeq_relaxed(db->db_key64 | idx, db->doorbell); + } else { + u32 db_val = db->db_key32 | idx; + + writel_relaxed(db_val, db->doorbell); + if (bp->flags & BNXT_FLAG_DOUBLE_DB) + writel_relaxed(db_val, db->doorbell); + } } /* For TX and RX ring doorbells */ -static inline void bnxt_db_write(struct bnxt *bp, void __iomem *db, u32 val) +static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, + u32 idx) { - writel(val, db); - if (bp->flags & BNXT_FLAG_DOUBLE_DB) - writel(val, db); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + writeq(db->db_key64 | idx, db->doorbell); + } else { + u32 db_val = db->db_key32 | idx; + + writel(db_val, db->doorbell); + if (bp->flags & BNXT_FLAG_DOUBLE_DB) + writel(db_val, db->doorbell); + } } extern const u16 bnxt_lhint_arr[]; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 790c684f08ab..140dbd62106d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -21,9 +21,22 @@ static const struct devlink_ops bnxt_dl_ops = { #endif /* CONFIG_BNXT_SRIOV */ }; +enum bnxt_dl_param_id { + BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, +}; + static const struct bnxt_dl_nvm_param nvm_params[] = { {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, BNXT_NVM_SHARED_CFG, 1}, + {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI, + BNXT_NVM_SHARED_CFG, 1}, + {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, + NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10}, + {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, + NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7}, + {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, + BNXT_NVM_SHARED_CFG, 1}, }; static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, @@ -55,8 +68,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE; - if (nvm_param.num_bits == 1) - buf = &val->vbool; + switch (bytesize) { + case 1: + if (nvm_param.num_bits == 1) + buf = &val->vbool; + else + buf = &val->vu8; + break; + case 2: + buf = &val->vu16; + break; + case 4: + buf = &val->vu32; + break; + default: + return -EFAULT; + } data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, &data_dma_addr, GFP_KERNEL); @@ -78,8 +105,12 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, memcpy(buf, data_addr, bytesize); dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr); - if (rc) + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { + netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); + return -EACCES; + } else if (rc) { return -EIO; + } return 0; } @@ -88,9 +119,15 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, { struct hwrm_nvm_get_variable_input req = {0}; struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); - return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); + rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); + if (!rc) + if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) + ctx->val.vbool = !ctx->val.vbool; + + return rc; } static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, @@ -100,14 +137,55 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, struct bnxt *bp = bnxt_get_bp_from_dl(dl); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1); + + if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) + ctx->val.vbool = !ctx->val.vbool; + return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); } +static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + int max_val = -1; + + if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX) + max_val = BNXT_MSIX_VEC_MAX; + + if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN) + max_val = BNXT_MSIX_VEC_MIN_MAX; + + if (val.vu32 > max_val) { + NL_SET_ERR_MSG_MOD(extack, "MSIX value is exceeding the range"); + return -EINVAL; + } + + return 0; +} + static const struct devlink_param bnxt_dl_params[] = { DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT), bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, NULL), + DEVLINK_PARAM_GENERIC(IGNORE_ARI, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + NULL), + DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + bnxt_dl_msix_validate), + DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + bnxt_dl_msix_validate), + DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, + "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + NULL), }; int bnxt_dl_register(struct bnxt *bp) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index 2f68dc048390..5b6b2c7d97cf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -33,8 +33,15 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) } } +#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108 +#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114 +#define NVM_OFF_IGNORE_ARI 164 +#define NVM_OFF_DIS_GRE_VER_CHECK 171 #define NVM_OFF_ENABLE_SRIOV 401 +#define BNXT_MSIX_VEC_MAX 1280 +#define BNXT_MSIX_VEC_MIN_MAX 128 + enum bnxt_nvm_dir_type { BNXT_NVM_SHARED_CFG = 40, BNXT_NVM_PORT_CFG, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index e52d7af3ab3e..48078564f025 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -148,6 +148,65 @@ reset_coalesce: #define BNXT_RX_STATS_EXT_ENTRY(counter) \ { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } +#define BNXT_TX_STATS_EXT_ENTRY(counter) \ + { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } + +#define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ + BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ + BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) + +#define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ + BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ + BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) + +#define BNXT_RX_STATS_EXT_PFC_ENTRIES \ + BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(7) + +#define BNXT_TX_STATS_EXT_PFC_ENTRIES \ + BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(7) + +#define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ + BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ + BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) + +#define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ + BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ + BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) + +#define BNXT_RX_STATS_EXT_COS_ENTRIES \ + BNXT_RX_STATS_EXT_COS_ENTRY(0), \ + BNXT_RX_STATS_EXT_COS_ENTRY(1), \ + BNXT_RX_STATS_EXT_COS_ENTRY(2), \ + BNXT_RX_STATS_EXT_COS_ENTRY(3), \ + BNXT_RX_STATS_EXT_COS_ENTRY(4), \ + BNXT_RX_STATS_EXT_COS_ENTRY(5), \ + BNXT_RX_STATS_EXT_COS_ENTRY(6), \ + BNXT_RX_STATS_EXT_COS_ENTRY(7) \ + +#define BNXT_TX_STATS_EXT_COS_ENTRIES \ + BNXT_TX_STATS_EXT_COS_ENTRY(0), \ + BNXT_TX_STATS_EXT_COS_ENTRY(1), \ + BNXT_TX_STATS_EXT_COS_ENTRY(2), \ + BNXT_TX_STATS_EXT_COS_ENTRY(3), \ + BNXT_TX_STATS_EXT_COS_ENTRY(4), \ + BNXT_TX_STATS_EXT_COS_ENTRY(5), \ + BNXT_TX_STATS_EXT_COS_ENTRY(6), \ + BNXT_TX_STATS_EXT_COS_ENTRY(7) \ + enum { RX_TOTAL_DISCARDS, TX_TOTAL_DISCARDS, @@ -256,11 +315,20 @@ static const struct { BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), + BNXT_RX_STATS_EXT_COS_ENTRIES, + BNXT_RX_STATS_EXT_PFC_ENTRIES, +}; + +static const struct { + long offset; + char string[ETH_GSTRING_LEN]; +} bnxt_tx_port_stats_ext_arr[] = { + BNXT_TX_STATS_EXT_COS_ENTRIES, + BNXT_TX_STATS_EXT_PFC_ENTRIES, }; #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) -#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr) static int bnxt_get_num_stats(struct bnxt *bp) { @@ -272,7 +340,8 @@ static int bnxt_get_num_stats(struct bnxt *bp) num_stats += BNXT_NUM_PORT_STATS; if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) - num_stats += BNXT_NUM_PORT_STATS_EXT; + num_stats += bp->fw_rx_stats_ext_size + + bp->fw_tx_stats_ext_size; return num_stats; } @@ -334,12 +403,17 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, } } if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { - __le64 *port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext; + __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext; + __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext; - for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++, j++) { - buf[j] = le64_to_cpu(*(port_stats_ext + + for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) { + buf[j] = le64_to_cpu(*(rx_port_stats_ext + bnxt_port_stats_ext_arr[i].offset)); } + for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) { + buf[j] = le64_to_cpu(*(tx_port_stats_ext + + bnxt_tx_port_stats_ext_arr[i].offset)); + } } } @@ -407,10 +481,15 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } } if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { - for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++) { + for (i = 0; i < bp->fw_rx_stats_ext_size; i++) { strcpy(buf, bnxt_port_stats_ext_arr[i].string); buf += ETH_GSTRING_LEN; } + for (i = 0; i < bp->fw_tx_stats_ext_size; i++) { + strcpy(buf, + bnxt_tx_port_stats_ext_arr[i].string); + buf += ETH_GSTRING_LEN; + } } break; case ETH_SS_TEST: @@ -2419,11 +2498,11 @@ static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } -static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi, +static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u32 raw_cons, int pkt_size) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + struct bnxt_napi *bnapi = cpr->bnapi; + struct bnxt_rx_ring_info *rxr; struct bnxt_sw_rx_bd *rx_buf; struct rx_cmp *rxcmp; u16 cp_cons, cons; @@ -2431,6 +2510,7 @@ static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi, u32 len; int i; + rxr = bnapi->rx_ring; cp_cons = RING_CMP(raw_cons); rxcmp = (struct rx_cmp *) &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; @@ -2451,17 +2531,15 @@ static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi, return 0; } -static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size) +static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + int pkt_size) { - struct bnxt_napi *bnapi = bp->bnapi[0]; - struct bnxt_cp_ring_info *cpr; struct tx_cmp *txcmp; int rc = -EIO; u32 raw_cons; u32 cons; int i; - cpr = &bnapi->cp_ring; raw_cons = cpr->cp_raw_cons; for (i = 0; i < 200; i++) { cons = RING_CMP(raw_cons); @@ -2477,7 +2555,7 @@ static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size) */ dma_rmb(); if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) { - rc = bnxt_rx_loopback(bp, bnapi, raw_cons, pkt_size); + rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); raw_cons = NEXT_RAW_CMP(raw_cons); raw_cons = NEXT_RAW_CMP(raw_cons); break; @@ -2491,12 +2569,14 @@ static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size) static int bnxt_run_loopback(struct bnxt *bp) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; + struct bnxt_cp_ring_info *cpr; int pkt_size, i = 0; struct sk_buff *skb; dma_addr_t map; u8 *data; int rc; + cpr = &txr->bnapi->cp_ring; pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh); skb = netdev_alloc_skb(bp->dev, pkt_size); if (!skb) @@ -2520,8 +2600,8 @@ static int bnxt_run_loopback(struct bnxt *bp) /* Sync BD data before updating doorbell */ wmb(); - bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | txr->tx_prod); - rc = bnxt_poll_loopback(bp, pkt_size); + bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); + rc = bnxt_poll_loopback(bp, cpr, pkt_size); dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); dev_kfree_skb(skb); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 971ace5d0d4a..5dd086059568 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -37,6 +37,8 @@ struct hwrm_resp_hdr { #define TLV_TYPE_HWRM_REQUEST 0x1UL #define TLV_TYPE_HWRM_RESPONSE 0x2UL #define TLV_TYPE_ROCE_SP_COMMAND 0x3UL +#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL +#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL #define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL #define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL #define TLV_TYPE_ENGINE_CKV_IV 0x8003UL @@ -186,6 +188,7 @@ struct cmd_nums { #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL + #define HWRM_STAT_CTX_ENG_QUERY 0xafUL #define HWRM_STAT_CTX_ALLOC 0xb0UL #define HWRM_STAT_CTX_FREE 0xb1UL #define HWRM_STAT_CTX_QUERY 0xb2UL @@ -235,6 +238,7 @@ struct cmd_nums { #define HWRM_CFA_PAIR_INFO 0x10fUL #define HWRM_FW_IPC_MSG 0x110UL #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL #define HWRM_ENGINE_CKV_HELLO 0x12dUL #define HWRM_ENGINE_CKV_STATUS 0x12eUL #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL @@ -295,6 +299,7 @@ struct cmd_nums { #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL #define HWRM_DBG_FW_CLI 0xff1aUL #define HWRM_DBG_I2C_CMD 0xff1bUL + #define HWRM_DBG_RING_INFO_GET 0xff1cUL #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL #define HWRM_NVM_VALIDATE_OPTION 0xffefUL #define HWRM_NVM_FLUSH 0xfff0UL @@ -320,20 +325,21 @@ struct cmd_nums { /* ret_codes (size:64b/8B) */ struct ret_codes { __le16 error_code; - #define HWRM_ERR_CODE_SUCCESS 0x0UL - #define HWRM_ERR_CODE_FAIL 0x1UL - #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL - #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL - #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL - #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL - #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL - #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL - #define HWRM_ERR_CODE_NO_BUFFER 0x8UL - #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL - #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL - #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL - #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL - #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED + #define HWRM_ERR_CODE_SUCCESS 0x0UL + #define HWRM_ERR_CODE_FAIL 0x1UL + #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL + #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL + #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL + #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL + #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL + #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL + #define HWRM_ERR_CODE_NO_BUFFER 0x8UL + #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL + #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL + #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL + #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL + #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL + #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED __le16 unused_0[3]; }; @@ -355,10 +361,10 @@ struct hwrm_err_output { #define HW_HASH_KEY_SIZE 40 #define HWRM_RESP_VALID_KEY 1 #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 9 -#define HWRM_VERSION_UPDATE 2 -#define HWRM_VERSION_RSVD 25 -#define HWRM_VERSION_STR "1.9.2.25" +#define HWRM_VERSION_MINOR 10 +#define HWRM_VERSION_UPDATE 0 +#define HWRM_VERSION_RSVD 3 +#define HWRM_VERSION_STR "1.10.0.3" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -396,10 +402,15 @@ struct hwrm_ver_get_output { u8 netctrl_fw_bld_8b; u8 netctrl_fw_rsvd_8b; __le32 dev_caps_cfg; - #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL - #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL - #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL - #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL + #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL + #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL + #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL + #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL + #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL + #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL u8 roce_fw_maj_8b; u8 roce_fw_min_8b; u8 roce_fw_bld_8b; @@ -528,6 +539,7 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL @@ -539,6 +551,7 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL + #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR __le32 event_data2; @@ -652,10 +665,11 @@ struct hwrm_async_event_cmpl_vf_cfg_change { u8 timestamp_lo; __le16 timestamp_hi; __le32 event_data1; - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL }; /* hwrm_func_reset_input (size:192b/24B) */ @@ -852,6 +866,7 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL + #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -903,6 +918,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL + #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -1014,6 +1030,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL + #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -1214,9 +1231,10 @@ struct hwrm_func_drv_rgtr_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL - #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL - #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -1416,7 +1434,9 @@ struct hwrm_func_resource_qcaps_output { __le16 min_hw_ring_grps; __le16 max_hw_ring_grps; __le16 max_tx_scheduler_inputs; - u8 unused_0[7]; + __le16 flags; + #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL + u8 unused_0[5]; u8 valid; }; @@ -1445,7 +1465,9 @@ struct hwrm_func_vf_resource_cfg_input { __le16 max_stat_ctx; __le16 min_hw_ring_grps; __le16 max_hw_ring_grps; - u8 unused_0[4]; + __le16 flags; + #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL + u8 unused_0[2]; }; /* hwrm_func_vf_resource_cfg_output (size:256b/32B) */ @@ -1503,7 +1525,8 @@ struct hwrm_func_backing_store_qcaps_output { __le16 mrav_entry_size; __le16 tim_entry_size; __le32 tim_max_entries; - u8 unused_0[3]; + u8 unused_0[2]; + u8 tqm_entries_multiple; u8 valid; }; @@ -1917,6 +1940,7 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB u8 auto_mode; @@ -1947,6 +1971,7 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB __le16 auto_link_speed_mask; @@ -1964,6 +1989,7 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL u8 wirespeed; #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL @@ -2048,6 +2074,7 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB u8 duplex_cfg; @@ -2072,6 +2099,7 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL __le16 force_link_speed; #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL @@ -2083,6 +2111,7 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB u8 auto_mode; @@ -2107,6 +2136,7 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB __le16 auto_link_speed_mask; @@ -2124,6 +2154,7 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL u8 wirespeed; #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL @@ -2178,7 +2209,11 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 u8 media_type; #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL @@ -2644,7 +2679,8 @@ struct hwrm_port_qstats_ext_output { __le16 tx_stat_size; __le16 rx_stat_size; __le16 total_active_cos_queues; - u8 unused_0; + u8 flags; + #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL u8 valid; }; @@ -2685,7 +2721,9 @@ struct hwrm_port_clr_stats_input { __le16 target_id; __le64 resp_addr; __le16 port_id; - u8 unused_0[6]; + u8 flags; + #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL + u8 unused_0[5]; }; /* hwrm_port_clr_stats_output (size:128b/16B) */ @@ -4574,7 +4612,9 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ - u8 unused_0[3]; + u8 unused_0; + __le16 flags; + #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL __le64 page_tbl_addr; __le32 fbo; u8 page_size; @@ -4838,13 +4878,19 @@ struct hwrm_cfa_l2_filter_alloc_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4 + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE __le32 enables; #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL @@ -4901,6 +4947,8 @@ struct hwrm_cfa_l2_filter_alloc_input { #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 unused_4; @@ -4958,11 +5006,17 @@ struct hwrm_cfa_l2_filter_cfg_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX - #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE __le32 enables; #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL @@ -5064,6 +5118,8 @@ struct hwrm_cfa_tunnel_filter_alloc_input { #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 tunnel_flags; @@ -5140,7 +5196,7 @@ struct hwrm_vxlan_ipv6_hdr { __be32 dest_ip_addr[4]; }; -/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */ +/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */ struct hwrm_cfa_encap_data_vxlan { u8 src_mac_addr[6]; __le16 unused_0; @@ -5159,6 +5215,10 @@ struct hwrm_cfa_encap_data_vxlan { __be16 src_port; __be16 dst_port; __be32 vni; + u8 hdr_rsvd0[3]; + u8 hdr_rsvd1; + u8 hdr_flags; + u8 unused[3]; }; /* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */ @@ -5171,15 +5231,18 @@ struct hwrm_cfa_encap_record_alloc_input { __le32 flags; #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL u8 encap_type; - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE u8 unused_0[3]; __le32 encap_data[20]; }; @@ -5273,6 +5336,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 pri_hint; @@ -5404,6 +5469,8 @@ struct hwrm_cfa_decap_filter_alloc_input { #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 unused_0; @@ -5476,19 +5543,22 @@ struct hwrm_cfa_flow_alloc_input { __le16 target_id; __le64 resp_addr; __le16 flags; - #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL - #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL - #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1 - #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1) - #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1) - #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1) - #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO - #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL - #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3 - #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3) - #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3) - #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3) - #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 + #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1 + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3 + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 + #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL __le16 src_fid; __le32 tunnel_handle; __le16 action_flags; @@ -5502,6 +5572,7 @@ struct hwrm_cfa_flow_alloc_input { #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL __le16 dst_fid; __be16 l2_rewrite_vlan_tpid; __be16 l2_rewrite_vlan_tci; @@ -5525,21 +5596,38 @@ struct hwrm_cfa_flow_alloc_input { __be16 nat_port; __be16 l2_rewrite_smac[3]; u8 ip_proto; - u8 unused_0; -}; - -/* hwrm_cfa_flow_alloc_output (size:128b/16B) */ + u8 tunnel_type; + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL +}; + +/* hwrm_cfa_flow_alloc_output (size:256b/32B) */ struct hwrm_cfa_flow_alloc_output { __le16 error_code; __le16 req_type; __le16 seq_id; __le16 resp_len; __le16 flow_handle; - u8 unused_0[5]; + u8 unused_0[2]; + __le32 flow_id; + __le64 ext_flow_handle; + u8 unused_1[7]; u8 valid; }; -/* hwrm_cfa_flow_free_input (size:192b/24B) */ +/* hwrm_cfa_flow_free_input (size:256b/32B) */ struct hwrm_cfa_flow_free_input { __le16 req_type; __le16 cmpl_ring; @@ -5548,6 +5636,7 @@ struct hwrm_cfa_flow_free_input { __le64 resp_addr; __le16 flow_handle; u8 unused_0[6]; + __le64 ext_flow_handle; }; /* hwrm_cfa_flow_free_output (size:256b/32B) */ @@ -5562,7 +5651,7 @@ struct hwrm_cfa_flow_free_output { u8 valid; }; -/* hwrm_cfa_flow_stats_input (size:320b/40B) */ +/* hwrm_cfa_flow_stats_input (size:640b/80B) */ struct hwrm_cfa_flow_stats_input { __le16 req_type; __le16 cmpl_ring; @@ -5581,6 +5670,16 @@ struct hwrm_cfa_flow_stats_input { __le16 flow_handle_8; __le16 flow_handle_9; u8 unused_0[2]; + __le32 flow_id_0; + __le32 flow_id_1; + __le32 flow_id_2; + __le32 flow_id_3; + __le32 flow_id_4; + __le32 flow_id_5; + __le32 flow_id_6; + __le32 flow_id_7; + __le32 flow_id_8; + __le32 flow_id_9; }; /* hwrm_cfa_flow_stats_output (size:1408b/176B) */ @@ -5670,7 +5769,8 @@ struct hwrm_tunnel_dst_port_query_input { #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE u8 unused_0[7]; }; @@ -5698,7 +5798,8 @@ struct hwrm_tunnel_dst_port_alloc_input { #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE u8 unused_0; __be16 tunnel_dst_port_val; u8 unused_1[4]; @@ -5727,7 +5828,8 @@ struct hwrm_tunnel_dst_port_free_input { #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE u8 unused_0; __le16 tunnel_dst_port_id; u8 unused_1[4]; @@ -5932,10 +6034,11 @@ struct hwrm_fw_reset_input { #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT u8 selfrst_status; - #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL - #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL - #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL - #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL + #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE u8 host_idx; u8 unused_0[5]; }; @@ -5947,10 +6050,11 @@ struct hwrm_fw_reset_output { __le16 seq_id; __le16 resp_len; u8 selfrst_status; - #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL - #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL - #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL - #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL + #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE u8 unused_0[6]; u8 valid; }; @@ -6498,6 +6602,34 @@ struct hwrm_dbg_coredump_retrieve_output { u8 valid; }; +/* hwrm_dbg_ring_info_get_input (size:192b/24B) */ +struct hwrm_dbg_ring_info_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX + u8 unused_0[3]; + __le32 fw_ring_id; +}; + +/* hwrm_dbg_ring_info_get_output (size:192b/24B) */ +struct hwrm_dbg_ring_info_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 producer_index; + __le32 consumer_index; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_nvm_read_input (size:320b/40B) */ struct hwrm_nvm_read_input { __le16 req_type; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index e1594c9df4c6..749f63beddd8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -189,7 +189,6 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, struct bnxt_tc_flow *flow) { struct flow_dissector *dissector = tc_flow_cmd->dissector; - u16 addr_type = 0; /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || @@ -199,13 +198,6 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, return -EOPNOTSUPP; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL); - - addr_type = key->addr_type; - } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_dissector_key_basic *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); @@ -301,13 +293,6 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, flow->l4_mask.icmp.code = mask->code; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { - struct flow_dissector_key_control *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL); - - addr_type = key->addr_type; - } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { struct flow_dissector_key_ipv4_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index e31f5d803c13..9a25c05aa571 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -209,9 +209,7 @@ struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) { struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev); - struct bnxt_vf_rep_stats *rx_stats; - rx_stats = &vf_rep->rx_stats; vf_rep->rx_stats.bytes += skb->len; vf_rep->rx_stats.packets++; @@ -523,7 +521,8 @@ int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode) return 0; } -int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) { struct bnxt *bp = bnxt_get_bp_from_dl(devlink); int rc = 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h index 38b9a75ad724..d7287651422f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -30,7 +30,8 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) bool bnxt_dev_is_vf_rep(struct net_device *dev); int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode); -int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode); +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); #else diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 0584d07c8c33..bf6de02be396 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -63,7 +63,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) tx_buf = &txr->tx_buf_ring[last_tx_cons]; rx_prod = tx_buf->rx_prod; } - bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rx_prod); + bnxt_db_write(bp, &rxr->rx_db, rx_prod); } /* returns the following: diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 4241ae928d4a..35564a8a48f9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -214,7 +214,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) case PHY_INTERFACE_MODE_MII: phy_name = "external MII"; - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); bcmgenet_sys_writel(priv, PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); break; @@ -226,11 +226,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) * capabilities, use that knowledge to also configure the * Reverse MII interface correctly. */ - if ((dev->phydev->supported & PHY_BASIC_FEATURES) == - PHY_BASIC_FEATURES) - port_ctrl = PORT_MODE_EXT_RVMII_25; - else + if (dev->phydev->supported & PHY_1000BT_FEATURES) port_ctrl = PORT_MODE_EXT_RVMII_50; + else + port_ctrl = PORT_MODE_EXT_RVMII_25; bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); break; @@ -321,9 +320,12 @@ int bcmgenet_mii_probe(struct net_device *dev) phydev->advertising = phydev->supported; /* The internal PHY has its link interrupts routed to the - * Ethernet MAC ISRs + * Ethernet MAC ISRs. On GENETv5 there is a hardware issue + * that prevents the signaling of link UP interrupts when + * the link operates at 10Mbps, so fallback to polling for + * those versions of GENET. */ - if (priv->internal_phy) + if (priv->internal_phy && !GENET_IS_V5(priv)) dev->phydev->irq = PHY_IGNORE_INTERRUPT; return 0; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index ef4a0c326736..5db9f4158e62 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -156,7 +156,7 @@ enum sbmac_state { (d)->sbdma_dscrtable : (d)->f+1) -#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) +#define NUMCACHEBLKS(x) DIV_ROUND_UP(x, SMP_CACHE_BYTES) #define SBMAC_MAX_TXDESCR 256 #define SBMAC_MAX_RXDESCR 256 @@ -299,7 +299,7 @@ static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *, static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff); static uint64_t sbmac_addr2reg(unsigned char *ptr); static irqreturn_t sbmac_intr(int irq, void *dev_instance); -static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); static void sbmac_setmulti(struct sbmac_softc *sc); static int sbmac_init(struct platform_device *pldev, long long base); static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed); @@ -2028,7 +2028,7 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance) * Return value: * nothing ********************************************************************* */ -static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); unsigned long flags; @@ -2357,21 +2357,11 @@ static int sbmac_mii_probe(struct net_device *dev) } /* Remove any features not supported by the controller */ - phy_dev->supported &= SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_MII | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause; + phy_set_max_speed(phy_dev, SPEED_1000); + phy_support_asym_pause(phy_dev); phy_attached_info(phy_dev); - phy_dev->advertising = phy_dev->supported; - sc->phy_dev = phy_dev; return 0; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index e6f28c7942ab..89295306f161 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1598,7 +1598,7 @@ static int tg3_mdio_init(struct tg3 *tp) phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; - /* fallthru */ + /* fall through */ case PHY_ID_RTL8211C: phydev->interface = PHY_INTERFACE_MODE_RGMII; break; @@ -2122,16 +2122,14 @@ static int tg3_phy_init(struct tg3 *tp) case PHY_INTERFACE_MODE_GMII: case PHY_INTERFACE_MODE_RGMII: if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { - phydev->supported &= (PHY_GBIT_FEATURES | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); + phy_set_max_speed(phydev, SPEED_1000); + phy_support_asym_pause(phydev); break; } - /* fallthru */ + /* fall through */ case PHY_INTERFACE_MODE_MII: - phydev->supported &= (PHY_BASIC_FEATURES | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); + phy_set_max_speed(phydev, SPEED_100); + phy_support_asym_pause(phydev); break; default: phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); @@ -2140,8 +2138,6 @@ static int tg3_phy_init(struct tg3 *tp) tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; - phydev->advertising = phydev->supported; - phy_attached_info(phydev); return 0; @@ -5215,7 +5211,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) ap->state = ANEG_STATE_AN_ENABLE; - /* fallthru */ + /* fall through */ case ANEG_STATE_AN_ENABLE: ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); if (ap->flags & MR_AN_ENABLE) { @@ -5245,7 +5241,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, ret = ANEG_TIMER_ENAB; ap->state = ANEG_STATE_RESTART; - /* fallthru */ + /* fall through */ case ANEG_STATE_RESTART: delta = ap->cur_time - ap->link_time; if (delta > ANEG_STATE_SETTLE_TIME) @@ -5288,7 +5284,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, ap->state = ANEG_STATE_ACK_DETECT; - /* fallthru */ + /* fall through */ case ANEG_STATE_ACK_DETECT: if (ap->ack_match != 0) { if ((ap->rxconfig & ~ANEG_CFG_ACK) == @@ -12496,31 +12492,24 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam tg3_warn_mgmt_link_flap(tp); if (tg3_flag(tp, USE_PHYLIB)) { - u32 newadv; struct phy_device *phydev; phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - (epause->rx_pause != epause->tx_pause))) + if (!phy_validate_pause(phydev, epause)) return -EINVAL; tp->link_config.flowctrl = 0; + phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); if (epause->rx_pause) { tp->link_config.flowctrl |= FLOW_CTRL_RX; if (epause->tx_pause) { tp->link_config.flowctrl |= FLOW_CTRL_TX; - newadv = ADVERTISED_Pause; - } else - newadv = ADVERTISED_Pause | - ADVERTISED_Asym_Pause; + } } else if (epause->tx_pause) { tp->link_config.flowctrl |= FLOW_CTRL_TX; - newadv = ADVERTISED_Asym_Pause; - } else - newadv = 0; + } if (epause->autoneg) tg3_flag_set(tp, PAUSE_AUTONEG); @@ -12528,33 +12517,19 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam tg3_flag_clear(tp, PAUSE_AUTONEG); if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - u32 oldadv = phydev->advertising & - (ADVERTISED_Pause | ADVERTISED_Asym_Pause); - if (oldadv != newadv) { - phydev->advertising &= - ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - phydev->advertising |= newadv; - if (phydev->autoneg) { - /* - * Always renegotiate the link to - * inform our link partner of our - * flow control settings, even if the - * flow control is forced. Let - * tg3_adjust_link() do the final - * flow control setup. - */ - return phy_start_aneg(phydev); - } + if (phydev->autoneg) { + /* phy_set_asym_pause() will + * renegotiate the link to inform our + * link partner of our flow control + * settings, even if the flow control + * is forced. Let tg3_adjust_link() + * do the final flow control setup. + */ + return 0; } if (!epause->autoneg) tg3_setup_flow_control(tp, 0, 0); - } else { - tp->link_config.advertising &= - ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - tp->link_config.advertising |= newadv; } } else { int irq_sync = 0; @@ -14013,7 +13988,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = tp->phy_addr; - /* fallthru */ + /* fall through */ case SIOCGMIIREG: { u32 mii_regval; diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index bba81735ce87..6d2d4527357c 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -1797,7 +1797,7 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna, /* A separate queue to allow synchronous setting of a list of MACs */ INIT_LIST_HEAD(&ucam_mod->del_q); - for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) + for (; i < (bna->ioceth.attr.num_ucmac * 2); i++) list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q); ucam_mod->bna = bna; @@ -1832,7 +1832,7 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna, /* A separate queue to allow synchronous setting of a list of MACs */ INIT_LIST_HEAD(&mcam_mod->del_q); - for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) + for (; i < (bna->ioceth.attr.num_mcmac * 2); i++) list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q); mcam_mod->bna = bna; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 58b9744c4058..8f5bf9166c11 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -544,14 +544,13 @@ static int macb_mii_probe(struct net_device *dev) /* mask with MAC supported features */ if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) - phydev->supported &= PHY_GBIT_FEATURES; + phy_set_max_speed(phydev, SPEED_1000); else - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) - phydev->supported &= ~SUPPORTED_1000baseT_Half; - - phydev->advertising = phydev->supported; + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); bp->link = 0; bp->speed = 0; @@ -4157,8 +4156,7 @@ static int macb_remove(struct platform_device *pdev) static int __maybe_unused macb_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *netdev = platform_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); netif_carrier_off(netdev); @@ -4180,8 +4178,7 @@ static int __maybe_unused macb_suspend(struct device *dev) static int __maybe_unused macb_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *netdev = platform_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); if (bp->wol & MACB_WOL_ENABLED) { diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c index 962bb62933db..fda49404968c 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c @@ -616,7 +616,7 @@ static void cn23xx_disable_vf_interrupt(struct octeon_device *oct, u8 intr_flag) int cn23xx_setup_octeon_vf_device(struct octeon_device *oct) { struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip; - u32 rings_per_vf, ring_flag; + u32 rings_per_vf; u64 reg_val; if (octeon_map_pci_barx(oct, 0, 0)) @@ -634,8 +634,6 @@ int cn23xx_setup_octeon_vf_device(struct octeon_device *oct) rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; - ring_flag = 0; - cn23xx->conf = oct_get_config_info(oct, LIO_23XX); if (!cn23xx->conf) { dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n", diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 8093c5eafea2..825a28e5b544 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -32,38 +32,6 @@ #define OCTNIC_MAX_SG MAX_SKB_FRAGS /** - * \brief Callback for getting interface configuration - * @param status status of request - * @param buf pointer to resp structure - */ -void lio_if_cfg_callback(struct octeon_device *oct, - u32 status __attribute__((unused)), void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_if_cfg_context *ctx; - struct liquidio_if_cfg_resp *resp; - - resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - - oct = lio_get_device(ctx->octeon_id); - if (resp->status) - dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", - CVM_CAST64(resp->status)); - WRITE_ONCE(ctx->cond, 1); - - snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", - resp->cfg_info.liquidio_firmware_version); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - -/** * \brief Delete gather lists * @param lio per-network private data */ @@ -198,14 +166,15 @@ int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) nctrl.ncmd.s.cmd = cmd; nctrl.ncmd.s.param1 = param1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -285,15 +254,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) struct octeon_device *oct = lio->oct_dev; u8 *mac; - if (nctrl->completion && nctrl->response_code) { - /* Signal whoever is interested that the response code from the - * firmware has arrived. - */ - WRITE_ONCE(*nctrl->response_code, nctrl->status); - complete(nctrl->completion); - } - - if (nctrl->status) + if (nctrl->sc_status) return; switch (nctrl->ncmd.s.cmd) { @@ -464,56 +425,73 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac) */ } +void octeon_schedule_rxq_oom_work(struct octeon_device *oct, + struct octeon_droq *droq) +{ + struct net_device *netdev = oct->props[0].netdev; + struct lio *lio = GET_LIO(netdev); + struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; + + queue_delayed_work(wq->wq, &wq->wk.work, + msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); +} + static void octnet_poll_check_rxq_oom_status(struct work_struct *work) { struct cavium_wk *wk = (struct cavium_wk *)work; struct lio *lio = (struct lio *)wk->ctxptr; struct octeon_device *oct = lio->oct_dev; - struct octeon_droq *droq; - int q, q_no = 0; + int q_no = wk->ctxul; + struct octeon_droq *droq = oct->droq[q_no]; - if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) { - for (q = 0; q < lio->linfo.num_rxpciq; q++) { - q_no = lio->linfo.rxpciq[q].s.q_no; - droq = oct->droq[q_no]; - if (!droq) - continue; - octeon_droq_check_oom(droq); - } - } - queue_delayed_work(lio->rxq_status_wq.wq, - &lio->rxq_status_wq.wk.work, - msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); + if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq) + return; + + if (octeon_retry_droq_refill(droq)) + octeon_schedule_rxq_oom_work(oct, droq); } int setup_rx_oom_poll_fn(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct cavium_wq *wq; + int q, q_no; - lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status", - WQ_MEM_RECLAIM, 0); - if (!lio->rxq_status_wq.wq) { - dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); - return -ENOMEM; + for (q = 0; q < oct->num_oqs; q++) { + q_no = lio->linfo.rxpciq[q].s.q_no; + wq = &lio->rxq_status_wq[q_no]; + wq->wq = alloc_workqueue("rxq-oom-status", + WQ_MEM_RECLAIM, 0); + if (!wq->wq) { + dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&wq->wk.work, + octnet_poll_check_rxq_oom_status); + wq->wk.ctxptr = lio; + wq->wk.ctxul = q_no; } - INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work, - octnet_poll_check_rxq_oom_status); - lio->rxq_status_wq.wk.ctxptr = lio; - queue_delayed_work(lio->rxq_status_wq.wq, - &lio->rxq_status_wq.wk.work, - msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); + return 0; } void cleanup_rx_oom_poll_fn(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); - - if (lio->rxq_status_wq.wq) { - cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work); - flush_workqueue(lio->rxq_status_wq.wq); - destroy_workqueue(lio->rxq_status_wq.wq); + struct octeon_device *oct = lio->oct_dev; + struct cavium_wq *wq; + int q_no; + + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + wq = &lio->rxq_status_wq[q_no]; + if (wq->wq) { + cancel_delayed_work_sync(&wq->wk.work); + flush_workqueue(wq->wq); + destroy_workqueue(wq->wq); + wq->wq = NULL; + } } } @@ -1218,30 +1196,6 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) return 0; } -static void liquidio_change_mtu_completion(struct octeon_device *oct, - u32 status, void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_if_cfg_context *ctx; - - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - - if (status) { - dev_err(&oct->pci_dev->dev, "MTU change failed. Status: %llx\n", - CVM_CAST64(status)); - WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_FAIL); - } else { - WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_SUCCESS); - } - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - /** * \brief Net device change_mtu * @param netdev network device @@ -1250,22 +1204,17 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - struct liquidio_if_cfg_context *ctx; struct octeon_soft_command *sc; union octnet_cmd *ncmd; - int ctx_size; int ret = 0; - ctx_size = sizeof(struct liquidio_if_cfg_context); sc = (struct octeon_soft_command *) - octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, ctx_size); + octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0); ncmd = (union octnet_cmd *)sc->virtdptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct); - init_waitqueue_head(&ctx->wc); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; ncmd->u64 = 0; ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU; @@ -1278,28 +1227,28 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); - sc->callback = liquidio_change_mtu_completion; - sc->callback_arg = sc; - sc->wait_time = 100; - ret = octeon_send_soft_command(oct, sc); if (ret == IQ_SEND_FAILED) { netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n"); + octeon_free_soft_command(oct, sc); return -EINVAL; } /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR || - ctx->cond == LIO_CHANGE_MTU_FAIL) { - octeon_free_soft_command(oct, sc); + ret = wait_for_sc_completion_timeout(oct, sc, 0); + if (ret) + return ret; + + if (sc->sc_status) { + WRITE_ONCE(sc->caller_is_done, true); return -EINVAL; } netdev->mtu = new_mtu; lio->mtu = new_mtu; - octeon_free_soft_command(oct, sc); + WRITE_ONCE(sc->caller_is_done, true); return 0; } @@ -1333,8 +1282,6 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)sc->virtrptr; - struct oct_nic_stats_ctrl *ctrl = - (struct oct_nic_stats_ctrl *)sc->ctxptr; struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; @@ -1422,93 +1369,148 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, resp->status = 1; } else { + dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n"); resp->status = -1; } - complete(&ctrl->complete); } -int octnet_get_link_stats(struct net_device *netdev) +static int lio_fetch_vf_stats(struct lio *lio) { - struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; struct octeon_soft_command *sc; - struct oct_nic_stats_ctrl *ctrl; - struct oct_nic_stats_resp *resp; + struct oct_nic_vf_stats_resp *resp; + int retval; /* Alloc soft command */ sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct_dev, 0, - sizeof(struct oct_nic_stats_resp), - sizeof(struct octnic_ctrl_pkt)); + sizeof(struct oct_nic_vf_stats_resp), + 0); - if (!sc) - return -ENOMEM; + if (!sc) { + dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n"); + retval = -ENOMEM; + goto lio_fetch_vf_stats_exit; + } - resp = (struct oct_nic_stats_resp *)sc->virtrptr; - memset(resp, 0, sizeof(struct oct_nic_stats_resp)); + resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp)); - ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; - memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); - ctrl->netdev = netdev; - init_completion(&ctrl->complete); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; sc->iq_no = lio->linfo.txpciq[0].s.q_no; octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, - OPCODE_NIC_PORT_STATS, 0, 0, 0); - - sc->callback = octnet_nic_stats_callback; - sc->callback_arg = sc; - sc->wait_time = 500; /*in milli seconds*/ + OPCODE_NIC_VF_PORT_STATS, 0, 0, 0); retval = octeon_send_soft_command(oct_dev, sc); if (retval == IQ_SEND_FAILED) { octeon_free_soft_command(oct_dev, sc); - return -EINVAL; + goto lio_fetch_vf_stats_exit; } - wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); + retval = + wait_for_sc_completion_timeout(oct_dev, sc, + (2 * LIO_SC_MAX_TMO_MS)); + if (retval) { + dev_err(&oct_dev->pci_dev->dev, + "sc OPCODE_NIC_VF_PORT_STATS command failed\n"); + goto lio_fetch_vf_stats_exit; + } - if (resp->status != 1) { - octeon_free_soft_command(oct_dev, sc); + if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) { + octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt, + (sizeof(u64)) >> 3); - return -EINVAL; + if (resp->spoofmac_cnt != 0) { + dev_warn(&oct_dev->pci_dev->dev, + "%llu Spoofed packets detected\n", + resp->spoofmac_cnt); + } } + WRITE_ONCE(sc->caller_is_done, 1); - octeon_free_soft_command(oct_dev, sc); - - return 0; +lio_fetch_vf_stats_exit: + return retval; } -static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct, - u32 status, - void *buf) +void lio_fetch_stats(struct work_struct *work) { - struct liquidio_nic_seapi_ctl_context *ctx; - struct octeon_soft_command *sc = buf; + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = wk->ctxptr; + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_soft_command *sc; + struct oct_nic_stats_resp *resp; + unsigned long time_in_jiffies; + int retval; + + if (OCTEON_CN23XX_PF(oct_dev)) { + /* report spoofchk every 2 seconds */ + if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) && + (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) && + oct_dev->sriov_info.num_vfs_alloced) { + lio_fetch_vf_stats(lio); + } - ctx = sc->ctxptr; + oct_dev->vfstats_poll++; + } + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + 0, + sizeof(struct oct_nic_stats_resp), + 0); + + if (!sc) { + dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n"); + goto lio_fetch_stats_exit; + } + + resp = (struct oct_nic_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_stats_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; - oct = lio_get_device(ctx->octeon_id); - if (status) { - dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n", - __func__, - CVM_CAST64(status)); + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_PORT_STATS, 0, 0, 0); + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + goto lio_fetch_stats_exit; + } + + retval = wait_for_sc_completion_timeout(oct_dev, sc, + (2 * LIO_SC_MAX_TMO_MS)); + if (retval) { + dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n"); + goto lio_fetch_stats_exit; } - ctx->status = status; - complete(&ctx->complete); + + octnet_nic_stats_callback(oct_dev, sc->sc_status, sc); + WRITE_ONCE(sc->caller_is_done, true); + +lio_fetch_stats_exit: + time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS); + if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) + schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies); + + return; } int liquidio_set_speed(struct lio *lio, int speed) { - struct liquidio_nic_seapi_ctl_context *ctx; struct octeon_device *oct = lio->oct_dev; struct oct_nic_seapi_resp *resp; struct octeon_soft_command *sc; union octnet_cmd *ncmd; - u32 ctx_size; int retval; u32 var; @@ -1521,21 +1523,18 @@ int liquidio_set_speed(struct lio *lio, int speed) return -EOPNOTSUPP; } - ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context); sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, sizeof(struct oct_nic_seapi_resp), - ctx_size); + 0); if (!sc) return -ENOMEM; ncmd = sc->virtdptr; - ctx = sc->ctxptr; resp = sc->virtrptr; memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); - ctx->octeon_id = lio_get_device_id(oct); - ctx->status = 0; - init_completion(&ctx->complete); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; ncmd->u64 = 0; ncmd->s.cmd = SEAPI_CMD_SPEED_SET; @@ -1548,30 +1547,24 @@ int liquidio_set_speed(struct lio *lio, int speed) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_UBOOT_CTL, 0, 0, 0); - sc->callback = liquidio_nic_seapi_ctl_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; - retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + octeon_free_soft_command(oct, sc); retval = -EBUSY; } else { /* Wait for response or timeout */ - if (wait_for_completion_timeout(&ctx->complete, - msecs_to_jiffies(10000)) == 0) { - dev_err(&oct->pci_dev->dev, "%s: sc timeout\n", - __func__); - octeon_free_soft_command(oct, sc); - return -EINTR; - } + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; retval = resp->status; if (retval) { dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n", __func__, retval); - octeon_free_soft_command(oct, sc); + WRITE_ONCE(sc->caller_is_done, true); + return -EIO; } @@ -1583,38 +1576,32 @@ int liquidio_set_speed(struct lio *lio, int speed) } oct->speed_setting = var; + WRITE_ONCE(sc->caller_is_done, true); } - octeon_free_soft_command(oct, sc); - return retval; } int liquidio_get_speed(struct lio *lio) { - struct liquidio_nic_seapi_ctl_context *ctx; struct octeon_device *oct = lio->oct_dev; struct oct_nic_seapi_resp *resp; struct octeon_soft_command *sc; union octnet_cmd *ncmd; - u32 ctx_size; int retval; - ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context); sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, sizeof(struct oct_nic_seapi_resp), - ctx_size); + 0); if (!sc) return -ENOMEM; ncmd = sc->virtdptr; - ctx = sc->ctxptr; resp = sc->virtrptr; memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); - ctx->octeon_id = lio_get_device_id(oct); - ctx->status = 0; - init_completion(&ctx->complete); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; ncmd->u64 = 0; ncmd->s.cmd = SEAPI_CMD_SPEED_GET; @@ -1626,37 +1613,20 @@ int liquidio_get_speed(struct lio *lio) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_UBOOT_CTL, 0, 0, 0); - sc->callback = liquidio_nic_seapi_ctl_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; - retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); - oct->no_speed_setting = 1; - oct->speed_setting = 25; - - retval = -EBUSY; + octeon_free_soft_command(oct, sc); + retval = -EIO; } else { - if (wait_for_completion_timeout(&ctx->complete, - msecs_to_jiffies(10000)) == 0) { - dev_err(&oct->pci_dev->dev, "%s: sc timeout\n", - __func__); - - oct->speed_setting = 25; - oct->no_speed_setting = 1; - - octeon_free_soft_command(oct, sc); + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; - return -EINTR; - } retval = resp->status; if (retval) { dev_err(&oct->pci_dev->dev, "%s failed retval=%d\n", __func__, retval); - oct->no_speed_setting = 1; - oct->speed_setting = 25; - octeon_free_soft_command(oct, sc); retval = -EIO; } else { u32 var; @@ -1664,16 +1634,171 @@ int liquidio_get_speed(struct lio *lio) var = be32_to_cpu((__force __be32)resp->speed); oct->speed_setting = var; if (var == 0xffff) { - oct->no_speed_setting = 1; /* unable to access boot variables * get the default value based on the NIC type */ - oct->speed_setting = 25; + if (oct->subsystem_id == + OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == + OCTEON_CN2360_25GB_SUBSYS_ID) { + oct->no_speed_setting = 1; + oct->speed_setting = 25; + } else { + oct->speed_setting = 10; + } } + } + WRITE_ONCE(sc->caller_is_done, true); + } + + return retval; +} + +int liquidio_set_fec(struct lio *lio, int on_off) +{ + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + struct octeon_device *oct; + union octnet_cmd *ncmd; + int retval; + u32 var; + + oct = lio->oct_dev; + + if (oct->props[lio->ifidx].fec == on_off) + return 0; + + if (!OCTEON_CN23XX_PF(oct)) { + dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n", + __func__); + return -1; + } + + if (oct->speed_boot != 25) { + dev_err(&oct->pci_dev->dev, + "Set FEC only when link speed is 25G during insmod\n"); + return -1; + } + + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), 0); + + ncmd = sc->virtdptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_FEC_SET; + ncmd->s.param1 = on_off; + /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */ + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + octeon_free_soft_command(oct, sc); + return -EIO; + } + + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return (-EIO); + + var = be32_to_cpu(resp->fec_setting); + resp->fec_setting = var; + if (var != on_off) { + dev_err(&oct->pci_dev->dev, + "Setting failed fec= %x, expect %x\n", + var, on_off); + oct->props[lio->ifidx].fec = var; + if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS) + oct->props[lio->ifidx].fec = 1; + else + oct->props[lio->ifidx].fec = 0; + } + + WRITE_ONCE(sc->caller_is_done, true); + + if (oct->props[lio->ifidx].fec != + oct->props[lio->ifidx].fec_boot) { + dev_dbg(&oct->pci_dev->dev, + "Reload driver to change fec to %s\n", + oct->props[lio->ifidx].fec ? "on" : "off"); + } + + return retval; +} + +int liquidio_get_fec(struct lio *lio) +{ + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + struct octeon_device *oct; + union octnet_cmd *ncmd; + int retval; + u32 var; + + oct = lio->oct_dev; + + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), 0); + if (!sc) + return -ENOMEM; + + ncmd = sc->virtdptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_FEC_GET; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, + "%s: Failed to send soft command\n", __func__); + octeon_free_soft_command(oct, sc); + return -EIO; } - octeon_free_soft_command(oct, sc); + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; + + var = be32_to_cpu(resp->fec_setting); + resp->fec_setting = var; + if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS) + oct->props[lio->ifidx].fec = 1; + else + oct->props[lio->ifidx].fec = 0; + + WRITE_ONCE(sc->caller_is_done, true); + + if (oct->props[lio->ifidx].fec != + oct->props[lio->ifidx].fec_boot) { + dev_dbg(&oct->pci_dev->dev, + "Reload driver to change fec to %s\n", + oct->props[lio->ifidx].fec ? "on" : "off"); + } return retval; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 8e05afd5e39c..4c3925af53bc 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -33,25 +33,12 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); -struct oct_intrmod_context { - int octeon_id; - wait_queue_head_t wc; - int cond; - int status; -}; - struct oct_intrmod_resp { u64 rh; struct oct_intrmod_cfg intrmod; u64 status; }; -struct oct_mdio_cmd_context { - int octeon_id; - wait_queue_head_t wc; - int cond; -}; - struct oct_mdio_cmd_resp { u64 rh; struct oct_mdio_cmd resp; @@ -257,6 +244,7 @@ static int lio_get_link_ksettings(struct net_device *netdev, linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || linfo->link.s.if_mode == INTERFACE_MODE_XFI) { dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n"); + ecmd->base.transceiver = XCVR_EXTERNAL; } else { dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n", linfo->link.s.if_mode); @@ -290,10 +278,12 @@ static int lio_get_link_ksettings(struct net_device *netdev, 10000baseCR_Full); } - if (oct->no_speed_setting == 0) + if (oct->no_speed_setting == 0) { liquidio_get_speed(lio); - else + liquidio_get_fec(lio); + } else { oct->speed_setting = 25; + } if (oct->speed_setting == 10) { ethtool_link_ksettings_add_link_mode @@ -317,6 +307,24 @@ static int lio_get_link_ksettings(struct net_device *netdev, (ecmd, advertising, 25000baseCR_Full); } + + if (oct->no_speed_setting) + break; + + ethtool_link_ksettings_add_link_mode + (ecmd, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, FEC_NONE); + /*FEC_OFF*/ + if (oct->props[lio->ifidx].fec == 1) { + /* ETHTOOL_FEC_RS */ + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, FEC_RS); + } else { + /* ETHTOOL_FEC_OFF */ + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, FEC_NONE); + } } else { /* VF */ if (linfo->link.s.speed == 10000) { ethtool_link_ksettings_add_link_mode @@ -472,12 +480,11 @@ lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues) nctrl.ncmd.s.param1 = num_queues; nctrl.ncmd.s.param2 = num_queues; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n", ret); return -1; @@ -708,13 +715,13 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val) nctrl.ncmd.s.param1 = addr; nctrl.ncmd.s.param2 = val; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Failed to configure gpio value, ret=%d\n", ret); return -EINVAL; } @@ -734,41 +741,19 @@ static int octnet_id_active(struct net_device *netdev, int val) nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE; nctrl.ncmd.s.param1 = val; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Failed to configure gpio value, ret=%d\n", ret); return -EINVAL; } return 0; } -/* Callback for when mdio command response arrives - */ -static void octnet_mdio_resp_callback(struct octeon_device *oct, - u32 status, - void *buf) -{ - struct oct_mdio_cmd_context *mdio_cmd_ctx; - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - - mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; - - oct = lio_get_device(mdio_cmd_ctx->octeon_id); - if (status) { - dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n", - CVM_CAST64(status)); - WRITE_ONCE(mdio_cmd_ctx->cond, -1); - } else { - WRITE_ONCE(mdio_cmd_ctx->cond, 1); - } - wake_up_interruptible(&mdio_cmd_ctx->wc); -} - /* This routine provides PHY access routines for * mdio clause45 . */ @@ -778,25 +763,20 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) struct octeon_device *oct_dev = lio->oct_dev; struct octeon_soft_command *sc; struct oct_mdio_cmd_resp *mdio_cmd_rsp; - struct oct_mdio_cmd_context *mdio_cmd_ctx; struct oct_mdio_cmd *mdio_cmd; int retval = 0; sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct_dev, sizeof(struct oct_mdio_cmd), - sizeof(struct oct_mdio_cmd_resp), - sizeof(struct oct_mdio_cmd_context)); + sizeof(struct oct_mdio_cmd_resp), 0); if (!sc) return -ENOMEM; - mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; - WRITE_ONCE(mdio_cmd_ctx->cond, 0); - mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev); mdio_cmd->op = op; mdio_cmd->mdio_addr = loc; if (op) @@ -808,42 +788,40 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 0, 0, 0); - sc->wait_time = 1000; - sc->callback = octnet_mdio_resp_callback; - sc->callback_arg = sc; - - init_waitqueue_head(&mdio_cmd_ctx->wc); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct_dev, sc); - if (retval == IQ_SEND_FAILED) { dev_err(&oct_dev->pci_dev->dev, "octnet_mdio45_access instruction failed status: %x\n", retval); - retval = -EBUSY; + octeon_free_soft_command(oct_dev, sc); + return -EBUSY; } else { /* Sleep on a wait queue till the cond flag indicates that the * response arrived */ - sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond); + retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); + if (retval) + return retval; + retval = mdio_cmd_rsp->status; if (retval) { - dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n"); - retval = -EBUSY; - } else { - octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), - sizeof(struct oct_mdio_cmd) / 8); - - if (READ_ONCE(mdio_cmd_ctx->cond) == 1) { - if (!op) - *value = mdio_cmd_rsp->resp.value1; - } else { - retval = -EINVAL; - } + dev_err(&oct_dev->pci_dev->dev, + "octnet mdio45 access failed: %x\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -EBUSY; } - } - octeon_free_soft_command(oct_dev, sc); + octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), + sizeof(struct oct_mdio_cmd) / 8); + + if (!op) + *value = mdio_cmd_rsp->resp.value1; + + WRITE_ONCE(sc->caller_is_done, true); + } return retval; } @@ -1007,8 +985,7 @@ lio_ethtool_get_ringparam(struct net_device *netdev, static int lio_23xx_reconfigure_queue_count(struct lio *lio) { struct octeon_device *oct = lio->oct_dev; - struct liquidio_if_cfg_context *ctx; - u32 resp_size, ctx_size, data_size; + u32 resp_size, data_size; struct liquidio_if_cfg_resp *resp; struct octeon_soft_command *sc; union oct_nic_if_cfg if_cfg; @@ -1018,11 +995,10 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) int j; resp_size = sizeof(struct liquidio_if_cfg_resp); - ctx_size = sizeof(struct liquidio_if_cfg_context); data_size = sizeof(struct lio_version); sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, data_size, - resp_size, ctx_size); + resp_size, 0); if (!sc) { dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n", __func__); @@ -1030,7 +1006,6 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) } resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; vdata = (struct lio_version *)sc->virtdptr; vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); @@ -1038,9 +1013,6 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); ifidx_or_pfnum = oct->pf_num; - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct); - init_waitqueue_head(&ctx->wc); if_cfg.u64 = 0; if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings; @@ -1052,27 +1024,29 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_QCOUNT_UPDATE, 0, if_cfg.u64, 0); - sc->callback = lio_if_cfg_callback; - sc->callback_arg = sc; - sc->wait_time = LIO_IFCFG_WAIT_TIME; + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { dev_err(&oct->pci_dev->dev, - "iq/oq config failed status: %x\n", + "Sending iq/oq config failed status: %x\n", retval); - goto qcount_update_fail; + octeon_free_soft_command(oct, sc); + return -EIO; } - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { - dev_err(&oct->pci_dev->dev, "Wait interrupted\n"); - return -1; - } + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; retval = resp->status; if (retval) { - dev_err(&oct->pci_dev->dev, "iq/oq config failed\n"); - goto qcount_update_fail; + dev_err(&oct->pci_dev->dev, + "iq/oq config failed: %x\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -1; } octeon_swap_8B_data((u64 *)(&resp->cfg_info), @@ -1097,16 +1071,12 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) lio->txq = lio->linfo.txpciq[0].s.q_no; lio->rxq = lio->linfo.rxpciq[0].s.q_no; - octeon_free_soft_command(oct, sc); dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n", lio->linfo.num_rxpciq); - return 0; - -qcount_update_fail: - octeon_free_soft_command(oct, sc); + WRITE_ONCE(sc->caller_is_done, true); - return -1; + return 0; } static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) @@ -1166,6 +1136,8 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) * steps like updating sriov_info for the octeon device need to be done. */ if (queue_count_update) { + cleanup_rx_oom_poll_fn(netdev); + lio_delete_glists(lio); /* Delete mbox for PF which is SRIOV disabled because sriov_info @@ -1265,6 +1237,11 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) return -1; } + if (setup_rx_oom_poll_fn(netdev)) { + dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n"); + return 1; + } + /* Send firmware the information about new number of queues * if the interface is a VF or a PF that is SRIOV enabled. */ @@ -1412,7 +1389,6 @@ lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; @@ -1433,8 +1409,9 @@ lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) } ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n"); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Failed to set pause parameter, ret=%d\n", ret); return -EINVAL; } @@ -1764,7 +1741,8 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, */ data[i++] = lstats.rx_dropped; /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ - data[i++] = lstats.tx_dropped; + data[i++] = lstats.tx_dropped + + oct_dev->link_stats.fromhost.fw_err_drop; data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; @@ -2013,34 +1991,11 @@ static int lio_vf_get_sset_count(struct net_device *netdev, int sset) } } -/* Callback function for intrmod */ -static void octnet_intrmod_callback(struct octeon_device *oct_dev, - u32 status, - void *ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; - struct oct_intrmod_context *ctx; - - ctx = (struct oct_intrmod_context *)sc->ctxptr; - - ctx->status = status; - - WRITE_ONCE(ctx->cond, 1); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - /* get interrupt moderation parameters */ static int octnet_get_intrmod_cfg(struct lio *lio, struct oct_intrmod_cfg *intr_cfg) { struct octeon_soft_command *sc; - struct oct_intrmod_context *ctx; struct oct_intrmod_resp *resp; int retval; struct octeon_device *oct_dev = lio->oct_dev; @@ -2049,8 +2004,7 @@ static int octnet_get_intrmod_cfg(struct lio *lio, sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct_dev, 0, - sizeof(struct oct_intrmod_resp), - sizeof(struct oct_intrmod_context)); + sizeof(struct oct_intrmod_resp), 0); if (!sc) return -ENOMEM; @@ -2058,20 +2012,13 @@ static int octnet_get_intrmod_cfg(struct lio *lio, resp = (struct oct_intrmod_resp *)sc->virtrptr; memset(resp, 0, sizeof(struct oct_intrmod_resp)); - ctx = (struct oct_intrmod_context *)sc->ctxptr; - memset(ctx, 0, sizeof(struct oct_intrmod_context)); - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct_dev); - init_waitqueue_head(&ctx->wc); - sc->iq_no = lio->linfo.txpciq[0].s.q_no; octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); - sc->callback = octnet_intrmod_callback; - sc->callback_arg = sc; - sc->wait_time = 1000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct_dev, sc); if (retval == IQ_SEND_FAILED) { @@ -2082,32 +2029,23 @@ static int octnet_get_intrmod_cfg(struct lio *lio, /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { - dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n"); - goto intrmod_info_wait_intr; - } + retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); + if (retval) + return -ENODEV; - retval = ctx->status || resp->status; - if (retval) { + if (resp->status) { dev_err(&oct_dev->pci_dev->dev, "Get interrupt moderation parameters failed\n"); - goto intrmod_info_wait_fail; + WRITE_ONCE(sc->caller_is_done, true); + return -ENODEV; } octeon_swap_8B_data((u64 *)&resp->intrmod, (sizeof(struct oct_intrmod_cfg)) / 8); memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg)); - octeon_free_soft_command(oct_dev, sc); + WRITE_ONCE(sc->caller_is_done, true); return 0; - -intrmod_info_wait_fail: - - octeon_free_soft_command(oct_dev, sc); - -intrmod_info_wait_intr: - - return -ENODEV; } /* Configure interrupt moderation parameters */ @@ -2115,7 +2053,6 @@ static int octnet_set_intrmod_cfg(struct lio *lio, struct oct_intrmod_cfg *intr_cfg) { struct octeon_soft_command *sc; - struct oct_intrmod_context *ctx; struct oct_intrmod_cfg *cfg; int retval; struct octeon_device *oct_dev = lio->oct_dev; @@ -2124,18 +2061,11 @@ static int octnet_set_intrmod_cfg(struct lio *lio, sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct_dev, sizeof(struct oct_intrmod_cfg), - 0, - sizeof(struct oct_intrmod_context)); + 16, 0); if (!sc) return -ENOMEM; - ctx = (struct oct_intrmod_context *)sc->ctxptr; - - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct_dev); - init_waitqueue_head(&ctx->wc); - cfg = (struct oct_intrmod_cfg *)sc->virtdptr; memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); @@ -2146,9 +2076,8 @@ static int octnet_set_intrmod_cfg(struct lio *lio, octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); - sc->callback = octnet_intrmod_callback; - sc->callback_arg = sc; - sc->wait_time = 1000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct_dev, sc); if (retval == IQ_SEND_FAILED) { @@ -2159,26 +2088,24 @@ static int octnet_set_intrmod_cfg(struct lio *lio, /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) { - retval = ctx->status; - if (retval) - dev_err(&oct_dev->pci_dev->dev, - "intrmod config failed. Status: %llx\n", - CVM_CAST64(retval)); - else - dev_info(&oct_dev->pci_dev->dev, - "Rx-Adaptive Interrupt moderation %s\n", - (intr_cfg->rx_enable) ? - "enabled" : "disabled"); - - octeon_free_soft_command(oct_dev, sc); - - return ((retval) ? -ENODEV : 0); + retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); + if (retval) + return retval; + + retval = sc->sc_status; + if (retval == 0) { + dev_info(&oct_dev->pci_dev->dev, + "Rx-Adaptive Interrupt moderation %s\n", + (intr_cfg->rx_enable) ? + "enabled" : "disabled"); + WRITE_ONCE(sc->caller_is_done, true); + return 0; } - dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n"); - - return -EINTR; + dev_err(&oct_dev->pci_dev->dev, + "intrmod config failed. Status: %x\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -ENODEV; } static int lio_get_intr_coalesce(struct net_device *netdev, @@ -3123,9 +3050,60 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags) return 0; } +static int lio_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + fec->active_fec = ETHTOOL_FEC_NONE; + fec->fec = ETHTOOL_FEC_NONE; + + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + if (oct->no_speed_setting == 1) + return 0; + + liquidio_get_fec(lio); + fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF); + if (oct->props[lio->ifidx].fec == 1) + fec->active_fec = ETHTOOL_FEC_RS; + else + fec->active_fec = ETHTOOL_FEC_OFF; + } + + return 0; +} + +static int lio_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + if (oct->no_speed_setting == 1) + return -EOPNOTSUPP; + + if (fec->fec & ETHTOOL_FEC_OFF) + liquidio_set_fec(lio, 0); + else if (fec->fec & ETHTOOL_FEC_RS) + liquidio_set_fec(lio, 1); + else + return -EOPNOTSUPP; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + static const struct ethtool_ops lio_ethtool_ops = { .get_link_ksettings = lio_get_link_ksettings, .set_link_ksettings = lio_set_link_ksettings, + .get_fecparam = lio_get_fecparam, + .set_fecparam = lio_set_fecparam, .get_link = ethtool_op_get_link, .get_drvinfo = lio_get_drvinfo, .get_ringparam = lio_ethtool_get_ringparam, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 6fb13fa73b27..3d24133e5e49 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -99,14 +99,6 @@ struct lio_trusted_vf_ctx { int status; }; -struct liquidio_rx_ctl_context { - int octeon_id; - - wait_queue_head_t wc; - - int cond; -}; - struct oct_link_status_resp { u64 rh; struct oct_link_info link_info; @@ -642,26 +634,6 @@ static inline void update_link_status(struct net_device *netdev, } /** - * lio_sync_octeon_time_cb - callback that is invoked when soft command - * sent by lio_sync_octeon_time() has completed successfully or failed - * - * @oct - octeon device structure - * @status - indicates success or failure - * @buf - pointer to the command that was sent to firmware - **/ -static void lio_sync_octeon_time_cb(struct octeon_device *oct, - u32 status, void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - - if (status) - dev_err(&oct->pci_dev->dev, - "Failed to sync time to octeon; error=%d\n", status); - - octeon_free_soft_command(oct, sc); -} - -/** * lio_sync_octeon_time - send latest localtime to octeon firmware so that * firmware will correct it's time, in case there is a time skew * @@ -677,7 +649,7 @@ static void lio_sync_octeon_time(struct work_struct *work) struct lio_time *lt; int ret; - sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0); + sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0); if (!sc) { dev_err(&oct->pci_dev->dev, "Failed to sync time to octeon: soft command allocation failed\n"); @@ -696,15 +668,16 @@ static void lio_sync_octeon_time(struct work_struct *work) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0); - sc->callback = lio_sync_octeon_time_cb; - sc->callback_arg = sc; - sc->wait_time = 1000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; ret = octeon_send_soft_command(oct, sc); if (ret == IQ_SEND_FAILED) { dev_err(&oct->pci_dev->dev, "Failed to sync time to octeon: failed to send soft command\n"); octeon_free_soft_command(oct, sc); + } else { + WRITE_ONCE(sc->caller_is_done, true); } queue_delayed_work(lio->sync_octeon_time_wq.wq, @@ -1037,12 +1010,12 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* fallthrough */ case OCT_DEV_IO_QUEUES_DONE: - if (wait_for_pending_requests(oct)) - dev_err(&oct->pci_dev->dev, "There were pending requests\n"); - if (lio_wait_for_instr_fetch(oct)) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + /* Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish. @@ -1052,6 +1025,31 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (lio_wait_for_oq_pkts(oct)) dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); + /* Force all requests waiting to be fetched by OCTEON to + * complete. + */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + lio_process_ordered_list(oct, 1); + octeon_free_sc_done_list(oct); + octeon_free_sc_zombie_list(oct); + /* fallthrough */ case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ @@ -1178,34 +1176,6 @@ static void octeon_destroy_resources(struct octeon_device *oct) } /** - * \brief Callback for rx ctrl - * @param status status of request - * @param buf pointer to resp structure - */ -static void rx_ctl_callback(struct octeon_device *oct, - u32 status, - void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_rx_ctl_context *ctx; - - ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; - - oct = lio_get_device(ctx->octeon_id); - if (status) - dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", - CVM_CAST64(status)); - WRITE_ONCE(ctx->cond, 1); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - -/** * \brief Send Rx control command * @param lio per-network private data * @param start_stop whether to start or stop @@ -1213,9 +1183,7 @@ static void rx_ctl_callback(struct octeon_device *oct, static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) { struct octeon_soft_command *sc; - struct liquidio_rx_ctl_context *ctx; union octnet_cmd *ncmd; - int ctx_size = sizeof(struct liquidio_rx_ctl_context); struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; int retval; @@ -1224,14 +1192,9 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, - 16, ctx_size); + 16, 0); ncmd = (union octnet_cmd *)sc->virtdptr; - ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; - - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct); - init_waitqueue_head(&ctx->wc); ncmd->u64 = 0; ncmd->s.cmd = OCTNET_CMD_RX_CTL; @@ -1244,23 +1207,25 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); - sc->callback = rx_ctl_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); + octeon_free_soft_command(oct, sc); + return; } else { /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) return; + oct->props[lio->ifidx].rx_on = start_stop; + WRITE_ONCE(sc->caller_is_done, true); } - - octeon_free_soft_command(oct, sc); } /** @@ -1274,8 +1239,10 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) { struct net_device *netdev = oct->props[ifidx].netdev; - struct lio *lio; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; + struct lio *lio; if (!netdev) { dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", @@ -1304,6 +1271,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) netif_napi_del(napi); + tasklet_enable(&oct_priv->droq_tasklet); + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -1840,9 +1809,13 @@ static int liquidio_open(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; if (oct->props[lio->ifidx].napi_enabled == 0) { + tasklet_disable(&oct_priv->droq_tasklet); + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_enable(napi); @@ -1876,6 +1849,12 @@ static int liquidio_open(struct net_device *netdev) /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); + /* start periodical statistics fetch */ + INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); + lio->stats_wk.ctxptr = lio; + schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies + (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); + dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); @@ -1890,6 +1869,8 @@ static int liquidio_stop(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; ifstate_reset(lio, LIO_IFSTATE_RUNNING); @@ -1916,6 +1897,8 @@ static int liquidio_stop(struct net_device *netdev) cleanup_tx_poll_fn(netdev); } + cancel_delayed_work_sync(&lio->stats_wk.work); + if (lio->ptp_clock) { ptp_clock_unregister(lio->ptp_clock); lio->ptp_clock = NULL; @@ -1934,6 +1917,8 @@ static int liquidio_stop(struct net_device *netdev) if (OCTEON_CN23XX_PF(oct)) oct->droq[0]->ops.poll_mode = 0; + + tasklet_enable(&oct_priv->droq_tasklet); } dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); @@ -2014,10 +1999,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev) /* Apparently, any activity in this call from the kernel has to * be atomic. So we won't wait for response. */ - nctrl.wait_time = 0; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", ret); } @@ -2046,8 +2030,6 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) nctrl.ncmd.s.more = 1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; - nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nctrl.wait_time = 100; nctrl.udd[0] = 0; /* The MAC Address is presented in network byte order. */ @@ -2058,6 +2040,14 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); return -ENOMEM; } + + if (nctrl.sc_status) { + dev_err(&oct->pci_dev->dev, + "%s: MAC Address change failed. sc return=%x\n", + __func__, nctrl.sc_status); + return -EIO; + } + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); @@ -2111,7 +2101,6 @@ liquidio_get_stats64(struct net_device *netdev, lstats->rx_packets = pkts; lstats->rx_dropped = drop; - octnet_get_link_stats(netdev); lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; lstats->collisions = oct->link_stats.fromhost.total_collisions; @@ -2324,7 +2313,7 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, * @returns whether the packet was transmitted to the device okay or not * (NETDEV_TX_OK or NETDEV_TX_BUSY) */ -static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) { struct lio *lio; struct octnet_buf_free_info *finfo; @@ -2598,14 +2587,15 @@ static int liquidio_vlan_rx_add_vid(struct net_device *netdev, nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; @@ -2626,14 +2616,15 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -2659,15 +2650,16 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.param1 = rx_cmd; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -2695,15 +2687,16 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command, nctrl.ncmd.s.more = vxlan_cmd_bit; nctrl.ncmd.s.param1 = vxlan_port; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "VxLAN port add/delete failed in core (ret:0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -2826,6 +2819,7 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; + int ret = 0; if (!is_valid_ether_addr(mac)) return -EINVAL; @@ -2839,12 +2833,13 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; /* vfidx is 0 based, but vf_num (param1) is 1 based */ nctrl.ncmd.s.param1 = vfidx + 1; - nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0); nctrl.ncmd.s.more = 1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; - nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nctrl.wait_time = LIO_CMD_WAIT_TM; + if (is_admin_assigned) { + nctrl.ncmd.s.param2 = true; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + } nctrl.udd[0] = 0; /* The MAC Address is presented in network byte order. */ @@ -2852,9 +2847,11 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; - octnet_send_nic_ctrl_pkt(oct, &nctrl); + ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); + if (ret > 0) + ret = -EIO; - return 0; + return ret; } static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) @@ -2873,6 +2870,62 @@ static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) return retval; } +static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, + bool enable) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int retval; + + if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) { + netif_info(lio, drv, lio->netdev, + "firmware does not support spoofchk\n"); + return -EOPNOTSUPP; + } + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { + netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); + return -EINVAL; + } + + if (enable) { + if (oct->sriov_info.vf_spoofchk[vfidx]) + return 0; + } else { + /* Clear */ + if (!oct->sriov_info.vf_spoofchk[vfidx]) + return 0; + } + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1; + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK; + nctrl.ncmd.s.param1 = + vfidx + 1; /* vfidx is 0 based, + * but vf_num (param1) is 1 based + */ + nctrl.ncmd.s.param2 = enable; + nctrl.ncmd.s.more = 0; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.cb_fn = 0; + + retval = octnet_send_nic_ctrl_pkt(oct, &nctrl); + + if (retval) { + netif_info(lio, drv, lio->netdev, + "Failed to set VF %d spoofchk %s\n", vfidx, + enable ? "on" : "off"); + return -1; + } + + oct->sriov_info.vf_spoofchk[vfidx] = enable; + netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx, + enable ? "on" : "off"); + + return 0; +} + static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, u16 vlan, u8 qos, __be16 vlan_proto) { @@ -2880,6 +2933,7 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; u16 vlantci; + int ret = 0; if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) return -EINVAL; @@ -2911,13 +2965,17 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.cb_fn = NULL; - nctrl.wait_time = LIO_CMD_WAIT_TM; - octnet_send_nic_ctrl_pkt(oct, &nctrl); + ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); + if (ret) { + if (ret > 0) + ret = -EIO; + return ret; + } oct->sriov_info.vf_vlantci[vfidx] = vlantci; - return 0; + return ret; } static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, @@ -2930,6 +2988,8 @@ static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) return -EINVAL; + memset(ivi, 0, sizeof(struct ifla_vf_info)); + ivi->vf = vfidx; macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; ether_addr_copy(&ivi->mac[0], macaddr); @@ -2941,33 +3001,22 @@ static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, else ivi->trusted = false; ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; - return 0; -} - -static void trusted_vf_callback(struct octeon_device *oct_dev, - u32 status, void *ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; - struct lio_trusted_vf_ctx *ctx; + ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx]; + ivi->max_tx_rate = lio->linfo.link.s.speed; + ivi->min_tx_rate = 0; - ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr; - ctx->status = status; - - complete(&ctx->complete); + return 0; } static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) { struct octeon_device *oct = lio->oct_dev; - struct lio_trusted_vf_ctx *ctx; struct octeon_soft_command *sc; - int ctx_size, retval; - - ctx_size = sizeof(struct lio_trusted_vf_ctx); - sc = octeon_alloc_soft_command(oct, 0, 0, ctx_size); + int retval; - ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr; - init_completion(&ctx->complete); + sc = octeon_alloc_soft_command(oct, 0, 16, 0); + if (!sc) + return -ENOMEM; sc->iq_no = lio->linfo.txpciq[0].s.q_no; @@ -2976,23 +3025,21 @@ static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1, trusted); - sc->callback = trusted_vf_callback; - sc->callback_arg = sc; - sc->wait_time = 1000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct, sc); retval = -1; } else { /* Wait for response or timeout */ - if (wait_for_completion_timeout(&ctx->complete, - msecs_to_jiffies(2000))) - retval = ctx->status; - else - retval = -1; - } + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return (retval); - octeon_free_soft_command(oct, sc); + WRITE_ONCE(sc->caller_is_done, true); + } return retval; } @@ -3055,6 +3102,7 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; + int ret = 0; if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) return -EINVAL; @@ -3070,13 +3118,15 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.cb_fn = NULL; - nctrl.wait_time = LIO_CMD_WAIT_TM; - octnet_send_nic_ctrl_pkt(oct, &nctrl); + ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); - oct->sriov_info.vf_linkstate[vfidx] = linkstate; + if (!ret) + oct->sriov_info.vf_linkstate[vfidx] = linkstate; + else if (ret > 0) + ret = -EIO; - return 0; + return ret; } static int @@ -3094,7 +3144,8 @@ liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode) } static int -liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode) +liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) { struct lio_devlink_priv *priv; struct octeon_device *oct; @@ -3204,6 +3255,7 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_mac = liquidio_set_vf_mac, .ndo_set_vf_vlan = liquidio_set_vf_vlan, .ndo_get_vf_config = liquidio_get_vf_config, + .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk, .ndo_set_vf_trust = liquidio_set_vf_trust, .ndo_set_vf_link_state = liquidio_set_vf_link_state, .ndo_get_vf_stats = liquidio_get_vf_stats, @@ -3307,7 +3359,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) unsigned long micro; u32 cur_ver; struct octeon_soft_command *sc; - struct liquidio_if_cfg_context *ctx; struct liquidio_if_cfg_resp *resp; struct octdev_props *props; int retval, num_iqueues, num_oqueues; @@ -3315,7 +3366,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) union oct_nic_if_cfg if_cfg; unsigned int base_queue; unsigned int gmx_port_id; - u32 resp_size, ctx_size, data_size; + u32 resp_size, data_size; u32 ifidx_or_pfnum; struct lio_version *vdata; struct devlink *devlink; @@ -3340,13 +3391,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) for (i = 0; i < octeon_dev->ifcount; i++) { resp_size = sizeof(struct liquidio_if_cfg_resp); - ctx_size = sizeof(struct liquidio_if_cfg_context); data_size = sizeof(struct lio_version); sc = (struct octeon_soft_command *) octeon_alloc_soft_command(octeon_dev, data_size, - resp_size, ctx_size); + resp_size, 0); resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; vdata = (struct lio_version *)sc->virtdptr; *((u64 *)vdata) = 0; @@ -3376,9 +3425,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_dbg(&octeon_dev->pci_dev->dev, "requesting config for interface %d, iqs %d, oqs %d\n", ifidx_or_pfnum, num_iqueues, num_oqueues); - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(octeon_dev); - init_waitqueue_head(&ctx->wc); if_cfg.u64 = 0; if_cfg.s.num_iqueues = num_iqueues; @@ -3392,9 +3438,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); - sc->callback = lio_if_cfg_callback; - sc->callback_arg = sc; - sc->wait_time = LIO_IFCFG_WAIT_TIME; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(octeon_dev, sc); if (retval == IQ_SEND_FAILED) { @@ -3402,22 +3447,26 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "iq/oq config failed status: %x\n", retval); /* Soft instr is freed by driver in case of failure. */ - goto setup_nic_dev_fail; + octeon_free_soft_command(octeon_dev, sc); + return(-EIO); } /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { - dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); - goto setup_nic_wait_intr; - } + retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); + if (retval) + return retval; retval = resp->status; if (retval) { dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } + snprintf(octeon_dev->fw_info.liquidio_firmware_version, + 32, "%s", + resp->cfg_info.liquidio_firmware_version); /* Verify f/w version (in case of 'auto' loading from flash) */ fw_ver = octeon_dev->fw_info.liquidio_firmware_version; @@ -3427,7 +3476,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n", LIQUIDIO_BASE_VERSION, fw_ver); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } else if (atomic_read(octeon_dev->adapter_fw_state) == FW_IS_PRELOADED) { dev_info(&octeon_dev->pci_dev->dev, @@ -3454,7 +3504,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", resp->cfg_info.iqmask, resp->cfg_info.oqmask); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } if (OCTEON_CN6XXX(octeon_dev)) { @@ -3473,7 +3524,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (!netdev) { dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); @@ -3488,14 +3540,16 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (retval) { dev_err(&octeon_dev->pci_dev->dev, "setting real number rx failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_free; } retval = netif_set_real_num_tx_queues(netdev, num_iqueues); if (retval) { dev_err(&octeon_dev->pci_dev->dev, "setting real number tx failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_free; } lio = GET_LIO(netdev); @@ -3522,6 +3576,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; + WRITE_ONCE(sc->caller_is_done, true); + lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); if (OCTEON_CN23XX_PF(octeon_dev) || @@ -3588,7 +3644,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Error setting VF%d MAC address\n", j); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } } @@ -3610,7 +3666,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->linfo.num_txpciq, lio->linfo.num_rxpciq)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); @@ -3621,7 +3677,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } /* Register ethtool support */ @@ -3643,20 +3699,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OCTNET_CMD_VERBOSE_ENABLE, 0); if (setup_link_status_change_wq(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; if ((octeon_dev->fw_info.app_cap_flags & LIQUIDIO_TIME_SYNC_CAP) && setup_sync_octeon_time_wq(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; if (setup_rx_oom_poll_fn(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; /* Register the network device with the OS */ if (register_netdev(netdev)) { dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } dev_dbg(&octeon_dev->pci_dev->dev, @@ -3679,8 +3735,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_dbg(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup successful\n", i); - octeon_free_soft_command(octeon_dev, sc); - if (octeon_dev->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || octeon_dev->subsystem_id == @@ -3709,13 +3763,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) } octeon_dev->speed_boot = octeon_dev->speed_setting; + /* don't read FEC setting if unsupported by f/w (see above) */ + if (octeon_dev->speed_boot == 25 && + !octeon_dev->no_speed_setting) { + liquidio_get_fec(lio); + octeon_dev->props[lio->ifidx].fec_boot = + octeon_dev->props[lio->ifidx].fec; + } } devlink = devlink_alloc(&liquidio_devlink_ops, sizeof(struct lio_devlink_priv)); if (!devlink) { dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); - goto setup_nic_wait_intr; + goto setup_nic_dev_free; } lio_devlink = devlink_priv(devlink); @@ -3725,7 +3786,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) devlink_free(devlink); dev_err(&octeon_dev->pci_dev->dev, "devlink registration failed\n"); - goto setup_nic_wait_intr; + goto setup_nic_dev_free; } octeon_dev->devlink = devlink; @@ -3733,17 +3794,16 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) return 0; -setup_nic_dev_fail: - - octeon_free_soft_command(octeon_dev, sc); - -setup_nic_wait_intr: +setup_nic_dev_free: while (i--) { dev_err(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup failed\n", i); liquidio_destroy_nic_device(octeon_dev, i); } + +setup_nic_dev_done: + return -ENODEV; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index b77835724dc8..54b245797d2e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -40,14 +40,6 @@ MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) -struct liquidio_rx_ctl_context { - int octeon_id; - - wait_queue_head_t wc; - - int cond; -}; - struct oct_timestamp_resp { u64 rh; u64 timestamp; @@ -452,6 +444,8 @@ static void octeon_pci_flr(struct octeon_device *oct) */ static void octeon_destroy_resources(struct octeon_device *oct) { + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct msix_entry *msix_entries; int i; @@ -471,12 +465,12 @@ static void octeon_destroy_resources(struct octeon_device *oct) case OCT_DEV_HOST_OK: /* fallthrough */ case OCT_DEV_IO_QUEUES_DONE: - if (wait_for_pending_requests(oct)) - dev_err(&oct->pci_dev->dev, "There were pending requests\n"); - if (lio_wait_for_instr_fetch(oct)) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + /* Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish. @@ -485,7 +479,33 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (lio_wait_for_oq_pkts(oct)) dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); - /* fall through */ + + /* Force all requests waiting to be fetched by OCTEON to + * complete. + */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + lio_process_ordered_list(oct, 1); + octeon_free_sc_done_list(oct); + octeon_free_sc_zombie_list(oct); + + /* fall through */ case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); @@ -569,33 +589,8 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* Nothing to be done here either */ break; } -} - -/** - * \brief Callback for rx ctrl - * @param status status of request - * @param buf pointer to resp structure - */ -static void rx_ctl_callback(struct octeon_device *oct, - u32 status, void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_rx_ctl_context *ctx; - - ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; - oct = lio_get_device(ctx->octeon_id); - if (status) - dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", - CVM_CAST64(status)); - WRITE_ONCE(ctx->cond, 1); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); + tasklet_kill(&oct_priv->droq_tasklet); } /** @@ -606,8 +601,6 @@ static void rx_ctl_callback(struct octeon_device *oct, static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) { struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; - int ctx_size = sizeof(struct liquidio_rx_ctl_context); - struct liquidio_rx_ctl_context *ctx; struct octeon_soft_command *sc; union octnet_cmd *ncmd; int retval; @@ -617,14 +610,9 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, - 16, ctx_size); + 16, 0); ncmd = (union octnet_cmd *)sc->virtdptr; - ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; - - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct); - init_waitqueue_head(&ctx->wc); ncmd->u64 = 0; ncmd->s.cmd = OCTNET_CMD_RX_CTL; @@ -637,23 +625,24 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); - sc->callback = rx_ctl_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); + octeon_free_soft_command(oct, sc); } else { /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) return; + oct->props[lio->ifidx].rx_on = start_stop; + WRITE_ONCE(sc->caller_is_done, true); } - - octeon_free_soft_command(oct, sc); } /** @@ -667,6 +656,8 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) { struct net_device *netdev = oct->props[ifidx].netdev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; struct lio *lio; @@ -696,6 +687,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) netif_napi_del(napi); + tasklet_enable(&oct_priv->droq_tasklet); + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -913,9 +906,13 @@ static int liquidio_open(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; if (!oct->props[lio->ifidx].napi_enabled) { + tasklet_disable(&oct_priv->droq_tasklet); + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_enable(napi); @@ -932,6 +929,11 @@ static int liquidio_open(struct net_device *netdev) netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); start_txqs(netdev); + INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); + lio->stats_wk.ctxptr = lio; + schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies + (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); + /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -948,6 +950,8 @@ static int liquidio_stop(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; /* tell Octeon to stop forwarding packets to host */ @@ -977,8 +981,12 @@ static int liquidio_stop(struct net_device *netdev) oct->props[lio->ifidx].napi_enabled = 0; oct->droq[0]->ops.poll_mode = 0; + + tasklet_enable(&oct_priv->droq_tasklet); } + cancel_delayed_work_sync(&lio->stats_wk.work); + dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); return 0; @@ -1093,10 +1101,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev) /* Apparently, any activity in this call from the kernel has to * be atomic. So we won't wait for response. */ - nctrl.wait_time = 0; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", ret); } @@ -1133,8 +1140,6 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) nctrl.ncmd.s.more = 1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; - nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nctrl.wait_time = 100; nctrl.udd[0] = 0; /* The MAC Address is presented in network byte order. */ @@ -1145,6 +1150,13 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); return -ENOMEM; } + + if (nctrl.sc_status == + FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) { + dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n"); + return -EPERM; + } + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); @@ -1198,7 +1210,6 @@ liquidio_get_stats64(struct net_device *netdev, lstats->rx_packets = pkts; lstats->rx_dropped = drop; - octnet_get_link_stats(netdev); lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; /* detailed rx_errors: */ @@ -1390,7 +1401,7 @@ static int send_nic_timestamp_pkt(struct octeon_device *oct, * @returns whether the packet was transmitted to the device okay or not * (NETDEV_TX_OK or NETDEV_TX_BUSY) */ -static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) { struct octnet_buf_free_info *finfo; union octnic_cmd_setup cmdsetup; @@ -1638,8 +1649,6 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct completion compl; - u16 response_code; int ret = 0; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); @@ -1648,26 +1657,15 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev, nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - init_completion(&compl); - nctrl.completion = &compl; - nctrl.response_code = &response_code; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", ret); - return -EIO; - } - - if (!wait_for_completion_timeout(&compl, - msecs_to_jiffies(nctrl.wait_time))) - return -EPERM; - - if (READ_ONCE(response_code)) return -EPERM; + } return 0; } @@ -1687,14 +1685,15 @@ liquidio_vlan_rx_kill_vid(struct net_device *netdev, nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -1720,14 +1719,15 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.param1 = rx_cmd; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -1755,15 +1755,16 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command, nctrl.ncmd.s.more = vxlan_cmd_bit; nctrl.ncmd.s.param1 = vxlan_port; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -1924,8 +1925,7 @@ nic_info_err: static int setup_nic_devices(struct octeon_device *octeon_dev) { int retval, num_iqueues, num_oqueues; - struct liquidio_if_cfg_context *ctx; - u32 resp_size, ctx_size, data_size; + u32 resp_size, data_size; struct liquidio_if_cfg_resp *resp; struct octeon_soft_command *sc; union oct_nic_if_cfg if_cfg; @@ -1956,13 +1956,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) for (i = 0; i < octeon_dev->ifcount; i++) { resp_size = sizeof(struct liquidio_if_cfg_resp); - ctx_size = sizeof(struct liquidio_if_cfg_context); data_size = sizeof(struct lio_version); sc = (struct octeon_soft_command *) octeon_alloc_soft_command(octeon_dev, data_size, - resp_size, ctx_size); + resp_size, 0); resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; vdata = (struct lio_version *)sc->virtdptr; *((u64 *)vdata) = 0; @@ -1970,10 +1968,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(octeon_dev); - init_waitqueue_head(&ctx->wc); - if_cfg.u64 = 0; if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; @@ -1986,32 +1980,37 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); - sc->callback = lio_if_cfg_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(octeon_dev, sc); if (retval == IQ_SEND_FAILED) { dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed status: %x\n", retval); /* Soft instr is freed by driver in case of failure. */ - goto setup_nic_dev_fail; + octeon_free_soft_command(octeon_dev, sc); + return(-EIO); } /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { - dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); - goto setup_nic_wait_intr; - } + retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); + if (retval) + return retval; retval = resp->status; if (retval) { - dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); - goto setup_nic_dev_fail; + dev_err(&octeon_dev->pci_dev->dev, + "iq/oq config failed, retval = %d\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -EIO; } + snprintf(octeon_dev->fw_info.liquidio_firmware_version, + 32, "%s", + resp->cfg_info.liquidio_firmware_version); + octeon_swap_8B_data((u64 *)(&resp->cfg_info), (sizeof(struct liquidio_if_cfg_info)) >> 3); @@ -2022,7 +2021,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", resp->cfg_info.iqmask, resp->cfg_info.oqmask); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } dev_dbg(&octeon_dev->pci_dev->dev, "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", @@ -2033,7 +2033,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (!netdev) { dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); @@ -2070,6 +2071,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; lio->linfo.macaddr_is_admin_asgnd = resp->cfg_info.linfo.macaddr_is_admin_asgnd; + lio->linfo.macaddr_spoofchk = + resp->cfg_info.linfo.macaddr_spoofchk; lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); @@ -2109,6 +2112,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) netdev->min_mtu = LIO_MIN_MTU_SIZE; netdev->max_mtu = LIO_MAX_MTU_SIZE; + WRITE_ONCE(sc->caller_is_done, true); + /* Point to the properties for octeon device to which this * interface belongs. */ @@ -2132,7 +2137,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->linfo.num_txpciq, lio->linfo.num_rxpciq)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); @@ -2155,7 +2160,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } /* Register ethtool support */ @@ -2170,15 +2175,15 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OCTNIC_LROIPV4 | OCTNIC_LROIPV6); if (setup_link_status_change_wq(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; if (setup_rx_oom_poll_fn(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; /* Register the network device with the OS */ if (register_netdev(netdev)) { dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } dev_dbg(&octeon_dev->pci_dev->dev, @@ -2201,24 +2206,21 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_dbg(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup successful\n", i); - octeon_free_soft_command(octeon_dev, sc); - octeon_dev->no_speed_setting = 1; } return 0; -setup_nic_dev_fail: - - octeon_free_soft_command(octeon_dev, sc); - -setup_nic_wait_intr: +setup_nic_dev_free: while (i--) { dev_err(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup failed\n", i); liquidio_destroy_nic_device(octeon_dev, i); } + +setup_nic_dev_done: + return -ENODEV; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index ddd7431579f4..ea9859e028d4 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -27,11 +27,11 @@ #include "octeon_network.h" #include <net/switchdev.h> #include "lio_vf_rep.h" -#include "octeon_network.h" static int lio_vf_rep_open(struct net_device *ndev); static int lio_vf_rep_stop(struct net_device *ndev); -static int lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev); +static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb, + struct net_device *ndev); static void lio_vf_rep_tx_timeout(struct net_device *netdev); static int lio_vf_rep_phys_port_name(struct net_device *dev, char *buf, size_t len); @@ -49,44 +49,25 @@ static const struct net_device_ops lio_vf_rep_ndev_ops = { .ndo_change_mtu = lio_vf_rep_change_mtu, }; -static void -lio_vf_rep_send_sc_complete(struct octeon_device *oct, - u32 status, void *ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; - struct lio_vf_rep_sc_ctx *ctx = - (struct lio_vf_rep_sc_ctx *)sc->ctxptr; - struct lio_vf_rep_resp *resp = - (struct lio_vf_rep_resp *)sc->virtrptr; - - if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status)) - WRITE_ONCE(resp->status, 0); - - complete(&ctx->complete); -} - static int lio_vf_rep_send_soft_command(struct octeon_device *oct, void *req, int req_size, void *resp, int resp_size) { int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size; - int ctx_size = sizeof(struct lio_vf_rep_sc_ctx); struct octeon_soft_command *sc = NULL; struct lio_vf_rep_resp *rep_resp; - struct lio_vf_rep_sc_ctx *ctx; void *sc_req; int err; sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, req_size, - tot_resp_size, ctx_size); + tot_resp_size, 0); if (!sc) return -ENOMEM; - ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr; - memset(ctx, 0, ctx_size); - init_completion(&ctx->complete); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; sc_req = (struct lio_vf_rep_req *)sc->virtdptr; memcpy(sc_req, req, req_size); @@ -98,23 +79,24 @@ lio_vf_rep_send_soft_command(struct octeon_device *oct, sc->iq_no = 0; octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_CMD, 0, 0, 0); - sc->callback = lio_vf_rep_send_sc_complete; - sc->callback_arg = sc; - sc->wait_time = LIO_VF_REP_REQ_TMO_MS; err = octeon_send_soft_command(oct, sc); if (err == IQ_SEND_FAILED) goto free_buff; - wait_for_completion_timeout(&ctx->complete, - msecs_to_jiffies - (2 * LIO_VF_REP_REQ_TMO_MS)); + err = wait_for_sc_completion_timeout(oct, sc, 0); + if (err) + return err; + err = READ_ONCE(rep_resp->status) ? -EBUSY : 0; if (err) dev_err(&oct->pci_dev->dev, "VF rep send config failed\n"); - - if (resp) + else if (resp) memcpy(resp, (rep_resp + 1), resp_size); + + WRITE_ONCE(sc->caller_is_done, true); + return err; + free_buff: octeon_free_soft_command(oct, sc); @@ -380,7 +362,7 @@ lio_vf_rep_packet_sent_callback(struct octeon_device *oct, netif_wake_queue(ndev); } -static int +static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) { struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev); @@ -404,7 +386,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) } sc = (struct octeon_soft_command *) - octeon_alloc_soft_command(oct, 0, 0, 0); + octeon_alloc_soft_command(oct, 0, 16, 0); if (!sc) { dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n"); goto xmit_failed; @@ -413,6 +395,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) /* Multiple buffers are not used for vf_rep packets. */ if (skb_shinfo(skb)->nr_frags != 0) { dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n"); + octeon_free_soft_command(oct, sc); goto xmit_failed; } @@ -420,6 +403,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) { dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n"); + octeon_free_soft_command(oct, sc); goto xmit_failed; } @@ -440,6 +424,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) if (status == IQ_SEND_FAILED) { dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, sc->datasize, DMA_TO_DEVICE); + octeon_free_soft_command(oct, sc); goto xmit_failed; } diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 7407fcd338e9..a5e0e9f17959 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -118,6 +118,10 @@ enum octeon_tag_type { /* App specific capabilities from firmware to pf driver */ #define LIQUIDIO_TIME_SYNC_CAP 0x1 #define LIQUIDIO_SWITCHDEV_CAP 0x2 +#define LIQUIDIO_SPOOFCHK_CAP 0x4 + +/* error status return from firmware */ +#define OCTEON_REQUEST_NO_PERMISSION 0xc static inline u32 incr_index(u32 index, u32 count, u32 max) { @@ -241,6 +245,10 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_QUEUE_COUNT_CTL 0x1f +#define OCTNET_CMD_GROUP1 1 +#define OCTNET_CMD_SET_VF_SPOOFCHK 0x1 +#define OCTNET_GROUP1_LAST_CMD OCTNET_CMD_SET_VF_SPOOFCHK + #define OCTNET_CMD_VXLAN_PORT_ADD 0x0 #define OCTNET_CMD_VXLAN_PORT_DEL 0x1 #define OCTNET_CMD_RXCSUM_ENABLE 0x0 @@ -250,9 +258,18 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1 #define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0 +#define OCTNET_CMD_FAIL 0x1 + +#define SEAPI_CMD_FEC_SET 0x0 +#define SEAPI_CMD_FEC_SET_DISABLE 0x0 +#define SEAPI_CMD_FEC_SET_RS 0x1 +#define SEAPI_CMD_FEC_GET 0x1 + #define SEAPI_CMD_SPEED_SET 0x2 #define SEAPI_CMD_SPEED_GET 0x3 +#define OPCODE_NIC_VF_PORT_STATS 0x22 + #define LIO_CMD_WAIT_TM 100 /* RX(packets coming from wire) Checksum verification flags */ @@ -301,7 +318,8 @@ union octnet_cmd { u64 more:6; /* How many udd words follow the command */ - u64 reserved:29; + u64 cmdgroup:8; + u64 reserved:21; u64 param1:16; @@ -313,7 +331,8 @@ union octnet_cmd { u64 param1:16; - u64 reserved:29; + u64 reserved:21; + u64 cmdgroup:8; u64 more:6; @@ -757,13 +776,17 @@ struct oct_link_info { #ifdef __BIG_ENDIAN_BITFIELD u64 gmxport:16; u64 macaddr_is_admin_asgnd:1; - u64 rsvd:31; + u64 rsvd:13; + u64 macaddr_spoofchk:1; + u64 rsvd1:17; u64 num_txpciq:8; u64 num_rxpciq:8; #else u64 num_rxpciq:8; u64 num_txpciq:8; - u64 rsvd:31; + u64 rsvd1:17; + u64 macaddr_spoofchk:1; + u64 rsvd:13; u64 macaddr_is_admin_asgnd:1; u64 gmxport:16; #endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index ceac74388e09..24c212001212 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -438,9 +438,10 @@ struct octeon_config { #define MAX_BAR1_IOREMAP_SIZE (16 * OCTEON_BAR1_ENTRY_SIZE) /* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking + * 1 process done list, 1 zombie lists(timeouted sc list) * NoResponse Lists are now maintained with each IQ. (Dec' 2007). */ -#define MAX_RESPONSE_LISTS 4 +#define MAX_RESPONSE_LISTS 6 /* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the * dispatch table. diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index f878a552fef3..ce8c3f818666 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1044,8 +1044,7 @@ void octeon_delete_dispatch_list(struct octeon_device *oct) dispatch = &oct->dispatch.dlist[i].list; while (dispatch->next != dispatch) { temp = dispatch->next; - list_del(temp); - list_add_tail(temp, &freelist); + list_move_tail(temp, &freelist); } oct->dispatch.dlist[i].opcode = 0; @@ -1440,18 +1439,15 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) /* the whole thing needs to be atomic, ideally */ if (droq) { pkts_pend = (u32)atomic_read(&droq->pkts_pending); - spin_lock_bh(&droq->lock); writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg); droq->pkt_count = pkts_pend; - /* this write needs to be flushed before we release the lock */ - mmiowb(); - spin_unlock_bh(&droq->lock); oct = droq->oct_dev; } if (iq) { spin_lock_bh(&iq->lock); - writel(iq->pkt_in_done, iq->inst_cnt_reg); - iq->pkt_in_done = 0; + writel(iq->pkts_processed, iq->inst_cnt_reg); + iq->pkt_in_done -= iq->pkts_processed; + iq->pkts_processed = 0; /* this write needs to be flushed before we release the lock */ mmiowb(); spin_unlock_bh(&iq->lock); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index d99ca6ba23a4..3d01d3602d8f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -316,6 +316,8 @@ struct octdev_props { * device pointer (used for OS specific calls). */ int rx_on; + int fec; + int fec_boot; int napi_enabled; int gmxport; struct net_device *netdev; @@ -397,6 +399,8 @@ struct octeon_sriov_info { int vf_linkstate[MAX_POSSIBLE_VFS]; + bool vf_spoofchk[MAX_POSSIBLE_VFS]; + u64 vf_drv_loaded_mask; }; @@ -607,6 +611,9 @@ struct octeon_device { u8 speed_boot; u8 speed_setting; u8 no_speed_setting; + + u32 vfstats_poll; +#define LIO_VFSTATS_POLL 10 }; #define OCT_DRV_ONLINE 1 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index a71dbb7ab6af..a0c099f71524 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -301,8 +301,6 @@ int octeon_init_droq(struct octeon_device *oct, dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n", droq->max_empty_descs); - spin_lock_init(&droq->lock); - INIT_LIST_HEAD(&droq->dispatch_list); /* For 56xx Pass1, this function won't be called, so no checks. */ @@ -333,8 +331,6 @@ init_droq_fail: * Returns: * Success: Pointer to recv_info_t * Failure: NULL. - * Locks: - * The droq->lock is held when this routine is called. */ static inline struct octeon_recv_info *octeon_create_recv_info( struct octeon_device *octeon_dev, @@ -433,8 +429,6 @@ octeon_droq_refill_pullup_descs(struct octeon_droq *droq, * up buffers (that were not dispatched) to form a contiguous ring. * Returns: * No of descriptors refilled. - * Locks: - * This routine is called with droq->lock held. */ static u32 octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) @@ -449,8 +443,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) while (droq->refill_count && (desc_refilled < droq->max_count)) { /* If a valid buffer exists (happens if there is no dispatch), - * reuse - * the buffer, else allocate. + * reuse the buffer, else allocate. */ if (!droq->recv_buf_list[droq->refill_idx].buffer) { pg_info = @@ -503,34 +496,37 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) /** check if we can allocate packets to get out of oom. * @param droq - Droq being checked. - * @return does not return anything + * @return 1 if fails to refill minimum */ -void octeon_droq_check_oom(struct octeon_droq *droq) +int octeon_retry_droq_refill(struct octeon_droq *droq) { - int desc_refilled; struct octeon_device *oct = droq->oct_dev; + int desc_refilled, reschedule = 1; + u32 pkts_credit; + + pkts_credit = readl(droq->pkts_credit_reg); + desc_refilled = octeon_droq_refill(oct, droq); + if (desc_refilled) { + /* Flush the droq descriptor data to memory to be sure + * that when we update the credits the data in memory + * is accurate. + */ + wmb(); + writel(desc_refilled, droq->pkts_credit_reg); + /* make sure mmio write completes */ + mmiowb(); - if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) { - spin_lock_bh(&droq->lock); - desc_refilled = octeon_droq_refill(oct, droq); - if (desc_refilled) { - /* Flush the droq descriptor data to memory to be sure - * that when we update the credits the data in memory - * is accurate. - */ - wmb(); - writel(desc_refilled, droq->pkts_credit_reg); - /* make sure mmio write completes */ - mmiowb(); - } - spin_unlock_bh(&droq->lock); + if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP) + reschedule = 0; } + + return reschedule; } static inline u32 octeon_droq_get_bufcount(u32 buf_size, u32 total_len) { - return ((total_len + buf_size - 1) / buf_size); + return DIV_ROUND_UP(total_len, buf_size); } static int @@ -603,9 +599,9 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, struct octeon_droq *droq, u32 pkts_to_process) { + u32 pkt, total_len = 0, pkt_count, retval; struct octeon_droq_info *info; union octeon_rh *rh; - u32 pkt, total_len = 0, pkt_count; pkt_count = pkts_to_process; @@ -709,30 +705,43 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, if (droq->refill_count >= droq->refill_threshold) { int desc_refilled = octeon_droq_refill(oct, droq); - /* Flush the droq descriptor data to memory to be sure - * that when we update the credits the data in memory - * is accurate. - */ - wmb(); - writel((desc_refilled), droq->pkts_credit_reg); - /* make sure mmio write completes */ - mmiowb(); + if (desc_refilled) { + /* Flush the droq descriptor data to memory to + * be sure that when we update the credits the + * data in memory is accurate. + */ + wmb(); + writel(desc_refilled, droq->pkts_credit_reg); + /* make sure mmio write completes */ + mmiowb(); + } } - } /* for (each packet)... */ /* Increment refill_count by the number of buffers processed. */ droq->stats.pkts_received += pkt; droq->stats.bytes_received += total_len; + retval = pkt; if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) { octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt)); droq->stats.dropped_toomany += (pkts_to_process - pkt); - return pkts_to_process; + retval = pkts_to_process; + } + + atomic_sub(retval, &droq->pkts_pending); + + if (droq->refill_count >= droq->refill_threshold && + readl(droq->pkts_credit_reg) < CN23XX_SLI_DEF_BP) { + octeon_droq_check_hw_for_pkts(droq); + + /* Make sure there are no pkts_pending */ + if (!atomic_read(&droq->pkts_pending)) + octeon_schedule_rxq_oom_work(oct, droq); } - return pkt; + return retval; } int @@ -740,29 +749,19 @@ octeon_droq_process_packets(struct octeon_device *oct, struct octeon_droq *droq, u32 budget) { - u32 pkt_count = 0, pkts_processed = 0; + u32 pkt_count = 0; struct list_head *tmp, *tmp2; - /* Grab the droq lock */ - spin_lock(&droq->lock); - octeon_droq_check_hw_for_pkts(droq); pkt_count = atomic_read(&droq->pkts_pending); - if (!pkt_count) { - spin_unlock(&droq->lock); + if (!pkt_count) return 0; - } if (pkt_count > budget) pkt_count = budget; - pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count); - - atomic_sub(pkts_processed, &droq->pkts_pending); - - /* Release the spin lock */ - spin_unlock(&droq->lock); + octeon_droq_fast_process_packets(oct, droq, pkt_count); list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { struct __dispatch *rdisp = (struct __dispatch *)tmp; @@ -798,8 +797,6 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct, if (budget > droq->max_count) budget = droq->max_count; - spin_lock(&droq->lock); - while (total_pkts_processed < budget) { octeon_droq_check_hw_for_pkts(droq); @@ -813,13 +810,9 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct, octeon_droq_fast_process_packets(oct, droq, pkts_available); - atomic_sub(pkts_processed, &droq->pkts_pending); - total_pkts_processed += pkts_processed; } - spin_unlock(&droq->lock); - list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { struct __dispatch *rdisp = (struct __dispatch *)tmp; @@ -879,9 +872,8 @@ octeon_enable_irq(struct octeon_device *oct, u32 q_no) int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, struct octeon_droq_ops *ops) { - struct octeon_droq *droq; - unsigned long flags; struct octeon_config *oct_cfg = NULL; + struct octeon_droq *droq; oct_cfg = octeon_get_conf(oct); @@ -901,21 +893,15 @@ int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, } droq = oct->droq[q_no]; - - spin_lock_irqsave(&droq->lock, flags); - memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops)); - spin_unlock_irqrestore(&droq->lock, flags); - return 0; } int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) { - unsigned long flags; - struct octeon_droq *droq; struct octeon_config *oct_cfg = NULL; + struct octeon_droq *droq; oct_cfg = octeon_get_conf(oct); @@ -936,14 +922,10 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) return 0; } - spin_lock_irqsave(&droq->lock, flags); - droq->ops.fptr = NULL; droq->ops.farg = NULL; droq->ops.drop_on_max = 0; - spin_unlock_irqrestore(&droq->lock, flags); - return 0; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h index f28f262d4ab6..c9b19e624dce 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -245,9 +245,6 @@ struct octeon_droq_ops { * Octeon DROQ. */ struct octeon_droq { - /** A spinlock to protect access to this ring. */ - spinlock_t lock; - u32 q_no; u32 pkt_count; @@ -414,6 +411,6 @@ int octeon_droq_process_poll_pkts(struct octeon_device *oct, int octeon_enable_irq(struct octeon_device *oct, u32 q_no); -void octeon_droq_check_oom(struct octeon_droq *droq); +int octeon_retry_droq_refill(struct octeon_droq *droq); #endif /*__OCTEON_DROQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 2327062e8af6..bebf3bd349c6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -94,6 +94,8 @@ struct octeon_instr_queue { u32 pkt_in_done; + u32 pkts_processed; + /** A spinlock to protect access to the input ring.*/ spinlock_t iq_flush_running_lock; @@ -290,13 +292,19 @@ struct octeon_soft_command { u32 ctxsize; /** Time out and callback */ - size_t wait_time; - size_t timeout; + size_t expiry_time; u32 iq_no; void (*callback)(struct octeon_device *, u32, void *); void *callback_arg; + + int caller_is_done; + u32 sc_status; + struct completion complete; }; +/* max timeout (in milli sec) for soft request */ +#define LIO_SC_MAX_TMO_MS 60000 + /** Maximum number of buffers to allocate into soft command buffer pool */ #define MAX_SOFT_COMMAND_BUFFERS 256 @@ -317,6 +325,8 @@ struct octeon_sc_buffer_pool { (((octeon_dev_ptr)->instr_queue[iq_no]->stats.field) += count) int octeon_setup_sc_buffer_pool(struct octeon_device *oct); +int octeon_free_sc_done_list(struct octeon_device *oct); +int octeon_free_sc_zombie_list(struct octeon_device *oct); int octeon_free_sc_buffer_pool(struct octeon_device *oct); struct octeon_soft_command * octeon_alloc_soft_command(struct octeon_device *oct, @@ -368,6 +378,9 @@ int octeon_send_command(struct octeon_device *oct, u32 iq_no, u32 force_db, void *cmd, void *buf, u32 datasize, u32 reqtype); +void octeon_dump_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc); + void octeon_prepare_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc, u8 opcode, u8 subcode, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index c846eec11a45..073d0647b439 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -70,6 +70,10 @@ void octeon_update_tx_completion_counters(void *buf, int reqtype, void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, unsigned int bytes_compl); void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac); + +void octeon_schedule_rxq_oom_work(struct octeon_device *oct, + struct octeon_droq *droq); + /** Swap 8B blocks */ static inline void octeon_swap_8B_data(u64 *data, u32 blocks) { @@ -146,46 +150,70 @@ err_release_region: return 1; } +/* input parameter: + * sc: pointer to a soft request + * timeout: milli sec which an application wants to wait for the + response of the request. + * 0: the request will wait until its response gets back + * from the firmware within LIO_SC_MAX_TMO_MS milli sec. + * It the response does not return within + * LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list() + * will move the request to zombie response list. + * + * return value: + * 0: got the response from firmware for the sc request. + * errno -EINTR: user abort the command. + * errno -ETIME: user spefified timeout value has been expired. + * errno -EBUSY: the response of the request does not return in + * resonable time (LIO_SC_MAX_TMO_MS). + * the sc wll be move to zombie response list by + * lio_process_ordered_list() + * + * A request with non-zero return value, the sc->caller_is_done + * will be marked 1. + * When getting a request with zero return value, the requestor + * should mark sc->caller_is_done with 1 after examing the + * response of sc. + * lio_process_ordered_list() will free the soft command on behalf + * of the soft command requestor. + * This is to fix the possible race condition of both timeout process + * and lio_process_ordered_list()/callback function to free a + * sc strucutre. + */ static inline int -sleep_cond(wait_queue_head_t *wait_queue, int *condition) +wait_for_sc_completion_timeout(struct octeon_device *oct_dev, + struct octeon_soft_command *sc, + unsigned long timeout) { int errno = 0; - wait_queue_entry_t we; - - init_waitqueue_entry(&we, current); - add_wait_queue(wait_queue, &we); - while (!(READ_ONCE(*condition))) { - set_current_state(TASK_INTERRUPTIBLE); - if (signal_pending(current)) { - errno = -EINTR; - goto out; - } - schedule(); + long timeout_jiff; + + if (timeout) + timeout_jiff = msecs_to_jiffies(timeout); + else + timeout_jiff = MAX_SCHEDULE_TIMEOUT; + + timeout_jiff = + wait_for_completion_interruptible_timeout(&sc->complete, + timeout_jiff); + if (timeout_jiff == 0) { + dev_err(&oct_dev->pci_dev->dev, "%s: sc is timeout\n", + __func__); + WRITE_ONCE(sc->caller_is_done, true); + errno = -ETIME; + } else if (timeout_jiff == -ERESTARTSYS) { + dev_err(&oct_dev->pci_dev->dev, "%s: sc is interrupted\n", + __func__); + WRITE_ONCE(sc->caller_is_done, true); + errno = -EINTR; + } else if (sc->sc_status == OCTEON_REQUEST_TIMEOUT) { + dev_err(&oct_dev->pci_dev->dev, "%s: sc has fatal timeout\n", + __func__); + WRITE_ONCE(sc->caller_is_done, true); + errno = -EBUSY; } -out: - set_current_state(TASK_RUNNING); - remove_wait_queue(wait_queue, &we); - return errno; -} -/* Gives up the CPU for a timeout period. - * Check that the condition is not true before we go to sleep for a - * timeout period. - */ -static inline void -sleep_timeout_cond(wait_queue_head_t *wait_queue, - int *condition, - int timeout) -{ - wait_queue_entry_t we; - - init_waitqueue_entry(&we, current); - add_wait_queue(wait_queue, &we); - set_current_state(TASK_INTERRUPTIBLE); - if (!(*condition)) - schedule_timeout(timeout); - set_current_state(TASK_RUNNING); - remove_wait_queue(wait_queue, &we); + return errno; } #ifndef ROUNDUP4 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index d7a3916fe877..50201fc86dcf 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -35,12 +35,6 @@ #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 #define LIO_IFSTATE_RESETTING 0x10 -struct liquidio_if_cfg_context { - u32 octeon_id; - wait_queue_head_t wc; - int cond; -}; - struct liquidio_if_cfg_resp { u64 rh; struct liquidio_if_cfg_info cfg_info; @@ -48,6 +42,7 @@ struct liquidio_if_cfg_resp { }; #define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */ +#define LIQUIDIO_NDEV_STATS_POLL_TIME_MS 200 /* Structure of a node in list of gather components maintained by * NIC driver for each network device. @@ -76,6 +71,12 @@ struct oct_nic_stats_resp { u64 status; }; +struct oct_nic_vf_stats_resp { + u64 rh; + u64 spoofmac_cnt; + u64 status; +}; + struct oct_nic_stats_ctrl { struct completion complete; struct net_device *netdev; @@ -83,16 +84,13 @@ struct oct_nic_stats_ctrl { struct oct_nic_seapi_resp { u64 rh; - u32 speed; + union { + u32 fec_setting; + u32 speed; + }; u64 status; }; -struct liquidio_nic_seapi_ctl_context { - int octeon_id; - u32 status; - struct completion complete; -}; - /** LiquidIO per-interface network private data */ struct lio { /** State of the interface. Rx/Tx happens only in the RUNNING state. */ @@ -178,7 +176,7 @@ struct lio { struct cavium_wq txq_status_wq; /* work queue for rxq oom status */ - struct cavium_wq rxq_status_wq; + struct cavium_wq rxq_status_wq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES]; /* work queue for link status */ struct cavium_wq link_status_wq; @@ -187,6 +185,7 @@ struct lio { struct cavium_wq sync_octeon_time_wq; int netdev_uc_count; + struct cavium_wk stats_wk; }; #define LIO_SIZE (sizeof(struct lio)) @@ -225,7 +224,7 @@ irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs); -int octnet_get_link_stats(struct net_device *netdev); +void lio_fetch_stats(struct work_struct *work); int lio_wait_for_clean_oq(struct octeon_device *oct); /** @@ -234,16 +233,14 @@ int lio_wait_for_clean_oq(struct octeon_device *oct); */ void liquidio_set_ethtool_ops(struct net_device *netdev); -void lio_if_cfg_callback(struct octeon_device *oct, - u32 status __attribute__((unused)), - void *buf); - void lio_delete_glists(struct lio *lio); int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs); int liquidio_get_speed(struct lio *lio); int liquidio_set_speed(struct lio *lio, int speed); +int liquidio_get_fec(struct lio *lio); +int liquidio_set_fec(struct lio *lio, int on_off); /** * \brief Net device change_mtu diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index 150609bd8849..1a706f81bbb0 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -75,8 +75,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, else sc->cmd.cmd2.rptr = sc->dmarptr; - sc->wait_time = 1000; - sc->timeout = jiffies + sc->wait_time; + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); return sc; } @@ -92,29 +91,6 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct, ndata->reqtype); } -static void octnet_link_ctrl_callback(struct octeon_device *oct, - u32 status, - void *sc_ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)sc_ptr; - struct octnic_ctrl_pkt *nctrl; - - nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr; - - /* Call the callback function if status is zero (meaning OK) or status - * contains a firmware status code bigger than zero (meaning the - * firmware is reporting an error). - * If no response was expected, status is OK if the command was posted - * successfully. - */ - if ((!status || status > FIRMWARE_STATUS_CODE(0)) && nctrl->cb_fn) { - nctrl->status = status; - nctrl->cb_fn(nctrl); - } - - octeon_free_soft_command(oct, sc); -} - static inline struct octeon_soft_command *octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct, struct octnic_ctrl_pkt *nctrl) @@ -127,17 +103,14 @@ static inline struct octeon_soft_command uddsize = (u32)(nctrl->ncmd.s.more * 8); datasize = OCTNET_CMD_SIZE + uddsize; - rdatasize = (nctrl->wait_time) ? 16 : 0; + rdatasize = 16; sc = (struct octeon_soft_command *) - octeon_alloc_soft_command(oct, datasize, rdatasize, - sizeof(struct octnic_ctrl_pkt)); + octeon_alloc_soft_command(oct, datasize, rdatasize, 0); if (!sc) return NULL; - memcpy(sc->ctxptr, nctrl, sizeof(struct octnic_ctrl_pkt)); - data = (u8 *)sc->virtdptr; memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE); @@ -154,9 +127,8 @@ static inline struct octeon_soft_command octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); - sc->callback = octnet_link_ctrl_callback; - sc->callback_arg = sc; - sc->wait_time = nctrl->wait_time; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; return sc; } @@ -199,5 +171,28 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct, } spin_unlock_bh(&oct->cmd_resp_wqlock); + + if (nctrl->ncmd.s.cmdgroup == 0) { + switch (nctrl->ncmd.s.cmd) { + /* caller holds lock, can not sleep */ + case OCTNET_CMD_CHANGE_DEVFLAGS: + case OCTNET_CMD_SET_MULTI_LIST: + case OCTNET_CMD_SET_UC_LIST: + WRITE_ONCE(sc->caller_is_done, true); + return retval; + } + } + + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return (retval); + + nctrl->sc_status = sc->sc_status; + retval = nctrl->sc_status; + if (nctrl->cb_fn) + nctrl->cb_fn(nctrl); + + WRITE_ONCE(sc->caller_is_done, true); + return retval; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h index de4130d26a98..87dd6f89ce51 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h @@ -52,20 +52,13 @@ struct octnic_ctrl_pkt { /** Input queue to use to send this command. */ u64 iq_no; - /** Time to wait for Octeon software to respond to this control command. - * If wait_time is 0, OSI assumes no response is expected. - */ - size_t wait_time; - /** The network device that issued the control command. */ u64 netpndev; /** Callback function called when the command has been fetched */ octnic_ctrl_pkt_cb_fn_t cb_fn; - u32 status; - u16 *response_code; - struct completion *completion; + u32 sc_status; }; #define MAX_UDD_SIZE(nctrl) (sizeof((nctrl)->udd)) diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 8f746e1348d4..c6f4cbda040f 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -123,6 +123,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, iq->do_auto_flush = 1; iq->db_timeout = (u32)conf->db_timeout; atomic_set(&iq->instr_pending, 0); + iq->pkts_processed = 0; /* Initialize the spinlock for this instruction queue */ spin_lock_init(&iq->lock); @@ -379,7 +380,6 @@ lio_process_iq_request_list(struct octeon_device *oct, u32 inst_count = 0; unsigned int pkts_compl = 0, bytes_compl = 0; struct octeon_soft_command *sc; - struct octeon_instr_irh *irh; unsigned long flags; while (old != iq->octeon_read_index) { @@ -401,40 +401,21 @@ lio_process_iq_request_list(struct octeon_device *oct, case REQTYPE_RESP_NET: case REQTYPE_SOFT_COMMAND: sc = buf; - - if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) - irh = (struct octeon_instr_irh *) - &sc->cmd.cmd3.irh; - else - irh = (struct octeon_instr_irh *) - &sc->cmd.cmd2.irh; - if (irh->rflag) { - /* We're expecting a response from Octeon. - * It's up to lio_process_ordered_list() to - * process sc. Add sc to the ordered soft - * command response list because we expect - * a response from Octeon. - */ - spin_lock_irqsave - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - atomic_inc(&oct->response_list - [OCTEON_ORDERED_SC_LIST]. - pending_req_count); - list_add_tail(&sc->node, &oct->response_list - [OCTEON_ORDERED_SC_LIST].head); - spin_unlock_irqrestore - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - } else { - if (sc->callback) { - /* This callback must not sleep */ - sc->callback(oct, OCTEON_REQUEST_DONE, - sc->callback_arg); - } - } + /* We're expecting a response from Octeon. + * It's up to lio_process_ordered_list() to + * process sc. Add sc to the ordered soft + * command response list because we expect + * a response from Octeon. + */ + spin_lock_irqsave(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, flags); + atomic_inc(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count); + list_add_tail(&sc->node, &oct->response_list + [OCTEON_ORDERED_SC_LIST].head); + spin_unlock_irqrestore(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); break; default: dev_err(&oct->pci_dev->dev, @@ -459,7 +440,7 @@ lio_process_iq_request_list(struct octeon_device *oct, if (atomic_read(&oct->response_list [OCTEON_ORDERED_SC_LIST].pending_req_count)) - queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1)); + queue_work(cwq->wq, &cwq->wk.work.work); return inst_count; } @@ -495,6 +476,7 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, lio_process_iq_request_list(oct, iq, 0); if (inst_processed) { + iq->pkts_processed += inst_processed; atomic_sub(inst_processed, &iq->instr_pending); iq->stats.instr_processed += inst_processed; } @@ -753,8 +735,7 @@ int octeon_send_soft_command(struct octeon_device *oct, len = (u32)ih2->dlengsz; } - if (sc->wait_time) - sc->timeout = jiffies + sc->wait_time; + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, len, REQTYPE_SOFT_COMMAND)); @@ -789,11 +770,76 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct) return 0; } +int octeon_free_sc_done_list(struct octeon_device *oct) +{ + struct octeon_response_list *done_sc_list, *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST]; + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + + if (!atomic_read(&done_sc_list->pending_req_count)) + return 0; + + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &done_sc_list->head) { + sc = list_entry(tmp, struct octeon_soft_command, node); + + if (READ_ONCE(sc->caller_is_done)) { + list_del(&sc->node); + atomic_dec(&done_sc_list->pending_req_count); + + if (*sc->status_word == COMPLETION_WORD_INIT) { + /* timeout; move sc to zombie list */ + list_add_tail(&sc->node, &zombie_sc_list->head); + atomic_inc(&zombie_sc_list->pending_req_count); + } else { + octeon_free_soft_command(oct, sc); + } + } + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + +int octeon_free_sc_zombie_list(struct octeon_device *oct) +{ + struct octeon_response_list *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) { + list_del(tmp); + atomic_dec(&zombie_sc_list->pending_req_count); + sc = list_entry(tmp, struct octeon_soft_command, node); + octeon_free_soft_command(oct, sc); + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + int octeon_free_sc_buffer_pool(struct octeon_device *oct) { struct list_head *tmp, *tmp2; struct octeon_soft_command *sc; + octeon_free_sc_zombie_list(oct); + spin_lock_bh(&oct->sc_buf_pool.lock); list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { @@ -822,6 +868,9 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc = NULL; struct list_head *tmp; + if (!rdatasize) + rdatasize = 16; + WARN_ON((offset + datasize + rdatasize + ctxsize) > SOFT_COMMAND_BUFFER_SIZE); diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c index fe5b53700576..ac7747ccf56a 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -69,6 +69,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, u32 status; u64 status64; + octeon_free_sc_done_list(octeon_dev); + ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST]; do { @@ -111,26 +113,88 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, } } } - } else if (force_quit || (sc->timeout && - time_after(jiffies, (unsigned long)sc->timeout))) { - dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n", - __func__, (long)jiffies, (long)sc->timeout); + } else if (unlikely(force_quit) || (sc->expiry_time && + time_after(jiffies, (unsigned long)sc->expiry_time))) { + struct octeon_instr_irh *irh = + (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; + + dev_err(&octeon_dev->pci_dev->dev, "%s: ", __func__); + dev_err(&octeon_dev->pci_dev->dev, + "cmd %x/%x/%llx/%llx failed, ", + irh->opcode, irh->subcode, + sc->cmd.cmd3.ossp[0], sc->cmd.cmd3.ossp[1]); + dev_err(&octeon_dev->pci_dev->dev, + "timeout (%ld, %ld)\n", + (long)jiffies, (long)sc->expiry_time); status = OCTEON_REQUEST_TIMEOUT; } if (status != OCTEON_REQUEST_PENDING) { + sc->sc_status = status; + /* we have received a response or we have timed out */ /* remove node from linked list */ list_del(&sc->node); atomic_dec(&octeon_dev->response_list - [OCTEON_ORDERED_SC_LIST]. - pending_req_count); - spin_unlock_bh - (&ordered_sc_list->lock); + [OCTEON_ORDERED_SC_LIST]. + pending_req_count); + + if (!sc->callback) { + atomic_inc(&octeon_dev->response_list + [OCTEON_DONE_SC_LIST]. + pending_req_count); + list_add_tail(&sc->node, + &octeon_dev->response_list + [OCTEON_DONE_SC_LIST].head); + + if (unlikely(READ_ONCE(sc->caller_is_done))) { + /* caller does not wait for response + * from firmware + */ + if (status != OCTEON_REQUEST_DONE) { + struct octeon_instr_irh *irh; + + irh = + (struct octeon_instr_irh *) + &sc->cmd.cmd3.irh; + dev_dbg + (&octeon_dev->pci_dev->dev, + "%s: sc failed: opcode=%x, ", + __func__, irh->opcode); + dev_dbg + (&octeon_dev->pci_dev->dev, + "subcode=%x, ossp[0]=%llx, ", + irh->subcode, + sc->cmd.cmd3.ossp[0]); + dev_dbg + (&octeon_dev->pci_dev->dev, + "ossp[1]=%llx, status=%d\n", + sc->cmd.cmd3.ossp[1], + status); + } + } else { + complete(&sc->complete); + } + + spin_unlock_bh(&ordered_sc_list->lock); + } else { + /* sc with callback function */ + if (status == OCTEON_REQUEST_TIMEOUT) { + atomic_inc(&octeon_dev->response_list + [OCTEON_ZOMBIE_SC_LIST]. + pending_req_count); + list_add_tail(&sc->node, + &octeon_dev->response_list + [OCTEON_ZOMBIE_SC_LIST]. + head); + } + + spin_unlock_bh(&ordered_sc_list->lock); - if (sc->callback) sc->callback(octeon_dev, status, sc->callback_arg); + /* sc is freed by caller */ + } request_complete++; diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h index 9169c2815dba..ed4020d26fae 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.h +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h @@ -53,7 +53,9 @@ enum { OCTEON_ORDERED_LIST = 0, OCTEON_UNORDERED_NONBLOCKING_LIST = 1, OCTEON_UNORDERED_BLOCKING_LIST = 2, - OCTEON_ORDERED_SC_LIST = 3 + OCTEON_ORDERED_SC_LIST = 3, + OCTEON_DONE_SC_LIST = 4, + OCTEON_ZOMBIE_SC_LIST = 5 }; /** Response Order values for a Octeon Request. */ diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index bb43ddb7539e..4b3aecf98f2a 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1268,12 +1268,13 @@ static int octeon_mgmt_stop(struct net_device *netdev) return 0; } -static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t +octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); union mgmt_port_ring_entry re; unsigned long flags; - int rv = NETDEV_TX_BUSY; + netdev_tx_t rv = NETDEV_TX_BUSY; re.d64 = 0; re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index e2cdfa75673f..75c1c5ed2387 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -67,6 +67,7 @@ config CHELSIO_T3 config CHELSIO_T4 tristate "Chelsio Communications T4/T5/T6 Ethernet support" depends on PCI && (IPV6 || IPV6=n) + depends on THERMAL || !THERMAL select FW_LOADER select MDIO select ZLIB_DEFLATE diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index c34ea385fe4a..1e82b9efe447 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -33,7 +33,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> -#include <linux/moduleparam.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/dma-mapping.h> @@ -3440,8 +3439,7 @@ static void remove_one(struct pci_dev *pdev) free_netdev(adapter->port[i]); iounmap(adapter->regs); - if (adapter->nofail_skb) - kfree_skb(adapter->nofail_skb); + kfree_skb(adapter->nofail_skb); kfree(adapter); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 50cd660732c5..84604aff53ce 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -1302,8 +1302,7 @@ void cxgb3_offload_deactivate(struct adapter *adapter) rcu_read_unlock(); RCU_INIT_POINTER(tdev->l2opt, NULL); call_rcu(&d->rcu_head, clean_l2_data); - if (t->nofail_skb) - kfree_skb(t->nofail_skb); + kfree_skb(t->nofail_skb); kfree(t); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index bea6a059a8f1..78e5d17a1d5f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -12,3 +12,6 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o +ifdef CONFIG_THERMAL +cxgb4-objs += cxgb4_thermal.o +endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 36d25883d123..b2d617abcf49 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -315,6 +315,48 @@ struct cudbg_pbt_tables { u32 pbt_data[CUDBG_PBT_DATA_ENTRIES]; }; +enum cudbg_qdesc_qtype { + CUDBG_QTYPE_UNKNOWN = 0, + CUDBG_QTYPE_NIC_TXQ, + CUDBG_QTYPE_NIC_RXQ, + CUDBG_QTYPE_NIC_FLQ, + CUDBG_QTYPE_CTRLQ, + CUDBG_QTYPE_FWEVTQ, + CUDBG_QTYPE_INTRQ, + CUDBG_QTYPE_PTP_TXQ, + CUDBG_QTYPE_OFLD_TXQ, + CUDBG_QTYPE_RDMA_RXQ, + CUDBG_QTYPE_RDMA_FLQ, + CUDBG_QTYPE_RDMA_CIQ, + CUDBG_QTYPE_ISCSI_RXQ, + CUDBG_QTYPE_ISCSI_FLQ, + CUDBG_QTYPE_ISCSIT_RXQ, + CUDBG_QTYPE_ISCSIT_FLQ, + CUDBG_QTYPE_CRYPTO_TXQ, + CUDBG_QTYPE_CRYPTO_RXQ, + CUDBG_QTYPE_CRYPTO_FLQ, + CUDBG_QTYPE_TLS_RXQ, + CUDBG_QTYPE_TLS_FLQ, + CUDBG_QTYPE_MAX, +}; + +#define CUDBG_QDESC_REV 1 + +struct cudbg_qdesc_entry { + u32 data_size; + u32 qtype; + u32 qid; + u32 desc_size; + u32 num_desc; + u8 data[0]; /* Must be last */ +}; + +struct cudbg_qdesc_info { + u32 qdesc_entry_size; + u32 num_queues; + u8 data[0]; /* Must be last */ +}; + #define IREG_NUM_ELEM 4 static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 215fe6260fd7..dec63c15c0ba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -81,7 +81,8 @@ enum cudbg_dbg_entity_type { CUDBG_MBOX_LOG = 66, CUDBG_HMA_INDIRECT = 67, CUDBG_HMA = 68, - CUDBG_MAX_ENTITY = 70, + CUDBG_QDESC = 70, + CUDBG_MAX_ENTITY = 71, }; struct cudbg_init { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index d97e0d7e541a..7c49681407ad 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -19,6 +19,7 @@ #include "t4_regs.h" #include "cxgb4.h" +#include "cxgb4_cudbg.h" #include "cudbg_if.h" #include "cudbg_lib_common.h" #include "cudbg_entity.h" @@ -2890,3 +2891,240 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, } return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } + +void cudbg_fill_qdesc_num_and_size(const struct adapter *padap, + u32 *num, u32 *size) +{ + u32 tot_entries = 0, tot_size = 0; + + /* NIC TXQ, RXQ, FLQ, and CTRLQ */ + tot_entries += MAX_ETH_QSETS * 3; + tot_entries += MAX_CTRL_QUEUES; + + tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; + tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE; + tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE; + tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES * + MAX_CTRL_TXQ_DESC_SIZE; + + /* FW_EVTQ and INTRQ */ + tot_entries += INGQ_EXTRAS; + tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE; + + /* PTP_TXQ */ + tot_entries += 1; + tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; + + /* ULD TXQ, RXQ, and FLQ */ + tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS; + tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2; + + tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * + MAX_TXQ_DESC_SIZE; + tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES * + MAX_RXQ_DESC_SIZE; + tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS * + MAX_FL_DESC_SIZE; + + /* ULD CIQ */ + tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS; + tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE * + MAX_RXQ_DESC_SIZE; + + tot_size += sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_qdesc_info) + + sizeof(struct cudbg_qdesc_entry) * tot_entries; + + if (num) + *num = tot_entries; + + if (size) + *size = tot_size; +} + +int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + u32 num_queues = 0, tot_entries = 0, size = 0; + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_qdesc_entry *qdesc_entry; + struct cudbg_qdesc_info *qdesc_info; + struct cudbg_ver_hdr *ver_hdr; + struct sge *s = &padap->sge; + u32 i, j, cur_off, tot_len; + u8 *data; + int rc; + + cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size); + size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE); + tot_len = size; + data = kvzalloc(size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + ver_hdr = (struct cudbg_ver_hdr *)data; + ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; + ver_hdr->revision = CUDBG_QDESC_REV; + ver_hdr->size = sizeof(struct cudbg_qdesc_info); + size -= sizeof(*ver_hdr); + + qdesc_info = (struct cudbg_qdesc_info *)(data + + sizeof(*ver_hdr)); + size -= sizeof(*qdesc_info); + qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data; + +#define QDESC_GET(q, desc, type, label) do { \ + if (size <= 0) { \ + goto label; \ + } \ + if (desc) { \ + cudbg_fill_qdesc_##q(q, type, qdesc_entry); \ + size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \ + num_queues++; \ + qdesc_entry = cudbg_next_qdesc(qdesc_entry); \ + } \ +} while (0) + +#define QDESC_GET_TXQ(q, type, label) do { \ + struct sge_txq *txq = (struct sge_txq *)q; \ + QDESC_GET(txq, txq->desc, type, label); \ +} while (0) + +#define QDESC_GET_RXQ(q, type, label) do { \ + struct sge_rspq *rxq = (struct sge_rspq *)q; \ + QDESC_GET(rxq, rxq->desc, type, label); \ +} while (0) + +#define QDESC_GET_FLQ(q, type, label) do { \ + struct sge_fl *flq = (struct sge_fl *)q; \ + QDESC_GET(flq, flq->desc, type, label); \ +} while (0) + + /* NIC TXQ */ + for (i = 0; i < s->ethqsets; i++) + QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out); + + /* NIC RXQ */ + for (i = 0; i < s->ethqsets; i++) + QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out); + + /* NIC FLQ */ + for (i = 0; i < s->ethqsets; i++) + QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out); + + /* NIC CTRLQ */ + for (i = 0; i < padap->params.nports; i++) + QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out); + + /* FW_EVTQ */ + QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out); + + /* INTRQ */ + QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out); + + /* PTP_TXQ */ + QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out); + + /* ULD Queues */ + mutex_lock(&uld_mutex); + + if (s->uld_txq_info) { + struct sge_uld_txq_info *utxq; + + /* ULD TXQ */ + for (j = 0; j < CXGB4_TX_MAX; j++) { + if (!s->uld_txq_info[j]) + continue; + + utxq = s->uld_txq_info[j]; + for (i = 0; i < utxq->ntxq; i++) + QDESC_GET_TXQ(&utxq->uldtxq[i].q, + cudbg_uld_txq_to_qtype(j), + out_unlock); + } + } + + if (s->uld_rxq_info) { + struct sge_uld_rxq_info *urxq; + u32 base; + + /* ULD RXQ */ + for (j = 0; j < CXGB4_ULD_MAX; j++) { + if (!s->uld_rxq_info[j]) + continue; + + urxq = s->uld_rxq_info[j]; + for (i = 0; i < urxq->nrxq; i++) + QDESC_GET_RXQ(&urxq->uldrxq[i].rspq, + cudbg_uld_rxq_to_qtype(j), + out_unlock); + } + + /* ULD FLQ */ + for (j = 0; j < CXGB4_ULD_MAX; j++) { + if (!s->uld_rxq_info[j]) + continue; + + urxq = s->uld_rxq_info[j]; + for (i = 0; i < urxq->nrxq; i++) + QDESC_GET_FLQ(&urxq->uldrxq[i].fl, + cudbg_uld_flq_to_qtype(j), + out_unlock); + } + + /* ULD CIQ */ + for (j = 0; j < CXGB4_ULD_MAX; j++) { + if (!s->uld_rxq_info[j]) + continue; + + urxq = s->uld_rxq_info[j]; + base = urxq->nrxq; + for (i = 0; i < urxq->nciq; i++) + QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq, + cudbg_uld_ciq_to_qtype(j), + out_unlock); + } + } + +out_unlock: + mutex_unlock(&uld_mutex); + +out: + qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry); + qdesc_info->num_queues = num_queues; + cur_off = 0; + while (tot_len) { + u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE); + + rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size, + &temp_buff); + if (rc) { + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + goto out_free; + } + + memcpy(temp_buff.data, data + cur_off, chunk_size); + tot_len -= chunk_size; + cur_off += chunk_size; + rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff, + dbg_buff); + if (rc) { + cudbg_put_buff(pdbg_init, &temp_buff); + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + goto out_free; + } + } + +out_free: + if (data) + kvfree(data); + +#undef QDESC_GET_FLQ +#undef QDESC_GET_RXQ +#undef QDESC_GET_TXQ +#undef QDESC_GET + + return rc; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index eebefe7cd18e..f047a01a3e5b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -171,6 +171,9 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, @@ -182,4 +185,107 @@ int cudbg_fill_meminfo(struct adapter *padap, struct cudbg_meminfo *meminfo_buff); void cudbg_fill_le_tcam_info(struct adapter *padap, struct cudbg_tcam *tcam_region); +void cudbg_fill_qdesc_num_and_size(const struct adapter *padap, + u32 *num, u32 *size); + +static inline u32 cudbg_uld_txq_to_qtype(u32 uld) +{ + switch (uld) { + case CXGB4_TX_OFLD: + return CUDBG_QTYPE_OFLD_TXQ; + case CXGB4_TX_CRYPTO: + return CUDBG_QTYPE_CRYPTO_TXQ; + } + + return CUDBG_QTYPE_UNKNOWN; +} + +static inline u32 cudbg_uld_rxq_to_qtype(u32 uld) +{ + switch (uld) { + case CXGB4_ULD_RDMA: + return CUDBG_QTYPE_RDMA_RXQ; + case CXGB4_ULD_ISCSI: + return CUDBG_QTYPE_ISCSI_RXQ; + case CXGB4_ULD_ISCSIT: + return CUDBG_QTYPE_ISCSIT_RXQ; + case CXGB4_ULD_CRYPTO: + return CUDBG_QTYPE_CRYPTO_RXQ; + case CXGB4_ULD_TLS: + return CUDBG_QTYPE_TLS_RXQ; + } + + return CUDBG_QTYPE_UNKNOWN; +} + +static inline u32 cudbg_uld_flq_to_qtype(u32 uld) +{ + switch (uld) { + case CXGB4_ULD_RDMA: + return CUDBG_QTYPE_RDMA_FLQ; + case CXGB4_ULD_ISCSI: + return CUDBG_QTYPE_ISCSI_FLQ; + case CXGB4_ULD_ISCSIT: + return CUDBG_QTYPE_ISCSIT_FLQ; + case CXGB4_ULD_CRYPTO: + return CUDBG_QTYPE_CRYPTO_FLQ; + case CXGB4_ULD_TLS: + return CUDBG_QTYPE_TLS_FLQ; + } + + return CUDBG_QTYPE_UNKNOWN; +} + +static inline u32 cudbg_uld_ciq_to_qtype(u32 uld) +{ + switch (uld) { + case CXGB4_ULD_RDMA: + return CUDBG_QTYPE_RDMA_CIQ; + } + + return CUDBG_QTYPE_UNKNOWN; +} + +static inline void cudbg_fill_qdesc_txq(const struct sge_txq *txq, + enum cudbg_qdesc_qtype type, + struct cudbg_qdesc_entry *entry) +{ + entry->qtype = type; + entry->qid = txq->cntxt_id; + entry->desc_size = sizeof(struct tx_desc); + entry->num_desc = txq->size; + entry->data_size = txq->size * sizeof(struct tx_desc); + memcpy(entry->data, txq->desc, entry->data_size); +} + +static inline void cudbg_fill_qdesc_rxq(const struct sge_rspq *rxq, + enum cudbg_qdesc_qtype type, + struct cudbg_qdesc_entry *entry) +{ + entry->qtype = type; + entry->qid = rxq->cntxt_id; + entry->desc_size = rxq->iqe_len; + entry->num_desc = rxq->size; + entry->data_size = rxq->size * rxq->iqe_len; + memcpy(entry->data, rxq->desc, entry->data_size); +} + +static inline void cudbg_fill_qdesc_flq(const struct sge_fl *flq, + enum cudbg_qdesc_qtype type, + struct cudbg_qdesc_entry *entry) +{ + entry->qtype = type; + entry->qid = flq->cntxt_id; + entry->desc_size = sizeof(__be64); + entry->num_desc = flq->size; + entry->data_size = flq->size * sizeof(__be64); + memcpy(entry->data, flq->desc, entry->data_size); +} + +static inline +struct cudbg_qdesc_entry *cudbg_next_qdesc(struct cudbg_qdesc_entry *e) +{ + return (struct cudbg_qdesc_entry *) + ((u8 *)e + sizeof(*e) + e->data_size); +} #endif /* __CUDBG_LIB_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 76d16747f513..b16f4b3ef4c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -52,6 +52,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/ptp_classify.h> #include <linux/crash_dump.h> +#include <linux/thermal.h> #include <asm/io.h> #include "t4_chip_type.h" #include "cxgb4_uld.h" @@ -533,6 +534,13 @@ enum { }; enum { + MAX_TXQ_DESC_SIZE = 64, + MAX_RXQ_DESC_SIZE = 128, + MAX_FL_DESC_SIZE = 8, + MAX_CTRL_TXQ_DESC_SIZE = 64, +}; + +enum { INGQ_EXTRAS = 2, /* firmware event queue and */ /* forwarded interrupts */ MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS, @@ -685,6 +693,7 @@ struct sge_eth_stats { /* Ethernet queue statistics */ unsigned long rx_cso; /* # of Rx checksum offloads */ unsigned long vlan_ex; /* # of Rx VLAN extractions */ unsigned long rx_drops; /* # of packets dropped due to no mem */ + unsigned long bad_rx_pkts; /* # of packets with err_vec!=0 */ }; struct sge_eth_rxq { /* SW Ethernet Rx queue */ @@ -882,6 +891,14 @@ struct mps_encap_entry { atomic_t refcnt; }; +#if IS_ENABLED(CONFIG_THERMAL) +struct ch_thermal { + struct thermal_zone_device *tzdev; + int trip_temp; + int trip_type; +}; +#endif + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -1000,6 +1017,9 @@ struct adapter { /* Dump buffer for collecting logs in kdump kernel */ struct vmcoredd_data vmcoredd; +#if IS_ENABLED(CONFIG_THERMAL) + struct ch_thermal ch_thermal; +#endif }; /* Support for "sched-class" command to allow a TX Scheduling Class to be @@ -1854,4 +1874,8 @@ void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n); int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, u16 vlan); int cxgb4_dcb_enabled(const struct net_device *dev); + +int cxgb4_thermal_init(struct adapter *adap); +int cxgb4_thermal_remove(struct adapter *adap); + #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 5f01c0a7fd98..972f0a124714 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -30,6 +30,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = { static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_MBOX_LOG, cudbg_collect_mbox_log }, + { CUDBG_QDESC, cudbg_collect_qdesc }, { CUDBG_DEV_LOG, cudbg_collect_fw_devlog }, { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, { CUDBG_CIM_LA, cudbg_collect_cim_la }, @@ -311,6 +312,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) } len = cudbg_mbytes_to_bytes(len); break; + case CUDBG_QDESC: + cudbg_fill_qdesc_num_and_size(adap, NULL, &len); + break; default: break; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index b34f0f077a31..9bd5f755a0e0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -114,6 +114,24 @@ void cxgb4_dcb_reset(struct net_device *dev) cxgb4_dcb_state_init(dev); } +/* update the dcb port support, if version is IEEE then set it to + * FW_PORT_DCB_VER_IEEE and if DCB_CAP_DCBX_VER_CEE is already set then + * clear that. and if it is set to CEE then set dcb supported to + * DCB_CAP_DCBX_VER_CEE & if DCB_CAP_DCBX_VER_IEEE is set, clear it + */ +static inline void cxgb4_dcb_update_support(struct port_dcb_info *dcb) +{ + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { + if (dcb->supported & DCB_CAP_DCBX_VER_CEE) + dcb->supported &= ~DCB_CAP_DCBX_VER_CEE; + dcb->supported |= DCB_CAP_DCBX_VER_IEEE; + } else if (dcb->dcb_version == FW_PORT_DCB_VER_CEE1D01) { + if (dcb->supported & DCB_CAP_DCBX_VER_IEEE) + dcb->supported &= ~DCB_CAP_DCBX_VER_IEEE; + dcb->supported |= DCB_CAP_DCBX_VER_CEE; + } +} + /* Finite State machine for Data Center Bridging. */ void cxgb4_dcb_state_fsm(struct net_device *dev, @@ -165,6 +183,15 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, } case CXGB4_DCB_STATE_FW_INCOMPLETE: { + if (transition_to != CXGB4_DCB_INPUT_FW_DISABLED) { + /* during this CXGB4_DCB_STATE_FW_INCOMPLETE state, + * check if the dcb version is changed (there can be + * mismatch in default config & the negotiated switch + * configuration at FW, so update the dcb support + * accordingly. + */ + cxgb4_dcb_update_support(dcb); + } switch (transition_to) { case CXGB4_DCB_INPUT_FW_ENABLED: { /* we're alreaady in firmware DCB mode */ @@ -273,8 +300,8 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, enum cxgb4_dcb_state_input input = ((pcmd->u.dcb.control.all_syncd_pkd & FW_PORT_CMD_ALL_SYNCD_F) - ? CXGB4_DCB_STATE_FW_ALLSYNCED - : CXGB4_DCB_STATE_FW_INCOMPLETE); + ? CXGB4_DCB_INPUT_FW_ALLSYNCED + : CXGB4_DCB_INPUT_FW_INCOMPLETE); if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) { dcb_running_version = FW_PORT_CMD_DCB_VERSION_G( diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h index 02040b99c78a..484ee8290090 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -67,7 +67,7 @@ do { \ if ((__dcb)->dcb_version == FW_PORT_DCB_VER_IEEE) \ cxgb4_dcb_state_fsm((__dev), \ - CXGB4_DCB_STATE_FW_ALLSYNCED); \ + CXGB4_DCB_INPUT_FW_ALLSYNCED); \ } while (0) /* States we can be in for a port's Data Center Bridging. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 0f72f9c4ec74..cab492ec8f59 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2784,6 +2784,7 @@ do { \ RL("LROmerged:", stats.lro_merged); RL("LROpackets:", stats.lro_pkts); RL("RxDrops:", stats.rx_drops); + RL("RxBadPkts:", stats.bad_rx_pkts); TL("TSO:", tso); TL("TxCSO:", tx_cso); TL("VLANins:", vlan_ins); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 961e3087d1d3..05a46926016a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -62,7 +62,6 @@ #include <net/netevent.h> #include <net/addrconf.h> #include <net/bonding.h> -#include <net/addrconf.h> #include <linux/uaccess.h> #include <linux/crash_dump.h> #include <net/udp_tunnel.h> @@ -2749,6 +2748,27 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf, return -EINVAL; } + if (max_tx_rate == 0) { + /* unbind VF to to any Traffic Class */ + fw_pfvf = + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH)); + fw_class = 0xffffffff; + ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, + &fw_pfvf, &fw_class); + if (ret) { + dev_err(adap->pdev_dev, + "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n", + ret, adap->pf, vf); + return -EINVAL; + } + dev_info(adap->pdev_dev, + "PF %d VF %d is unbound from TX Rate Limiting\n", + adap->pf, vf); + adap->vfinfo[vf].tx_rate = 0; + return 0; + } + ret = t4_get_link_params(pi, &link_ok, &speed, &mtu); if (ret != FW_SUCCESS) { dev_err(adap->pdev_dev, @@ -2798,8 +2818,8 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf, &fw_class); if (ret) { dev_err(adap->pdev_dev, - "Err %d in binding VF %d to Traffic Class %d\n", - ret, vf, class_id); + "Err %d in binding PF %d VF %d to Traffic Class %d\n", + ret, adap->pf, vf, class_id); return -EINVAL; } dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", @@ -4747,7 +4767,6 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); - pci_cleanup_aer_uncorrect_error_status(pdev); if (t4_wait_dev_ready(adap->regs) < 0) return PCI_ERS_RESULT_DISCONNECT; @@ -5844,6 +5863,10 @@ fw_attach_fail: if (!is_t4(adapter->params.chip)) cxgb4_ptp_init(adapter); + if (IS_ENABLED(CONFIG_THERMAL) && + !is_t4(adapter->params.chip) && (adapter->flags & FW_OK)) + cxgb4_thermal_init(adapter); + print_adapter_info(adapter); return 0; @@ -5909,6 +5932,8 @@ static void remove_one(struct pci_dev *pdev) if (!is_t4(adapter->params.chip)) cxgb4_ptp_stop(adapter); + if (IS_ENABLED(CONFIG_THERMAL)) + cxgb4_thermal_remove(adapter); /* If we allocated filters, free up state associated with any * valid filters ... diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c new file mode 100644 index 000000000000..28052e7504e5 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2017 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Written by: Ganesh Goudar (ganeshgr@chelsio.com) + */ + +#include "cxgb4.h" + +#define CXGB4_NUM_TRIPS 1 + +static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev, + int *temp) +{ + struct adapter *adap = tzdev->devdata; + u32 param, val; + int ret; + + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) | + FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP)); + + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, + ¶m, &val); + if (ret < 0 || val == 0) + return -1; + + *temp = val * 1000; + return 0; +} + +static int cxgb4_thermal_get_trip_type(struct thermal_zone_device *tzdev, + int trip, enum thermal_trip_type *type) +{ + struct adapter *adap = tzdev->devdata; + + if (!adap->ch_thermal.trip_temp) + return -EINVAL; + + *type = adap->ch_thermal.trip_type; + return 0; +} + +static int cxgb4_thermal_get_trip_temp(struct thermal_zone_device *tzdev, + int trip, int *temp) +{ + struct adapter *adap = tzdev->devdata; + + if (!adap->ch_thermal.trip_temp) + return -EINVAL; + + *temp = adap->ch_thermal.trip_temp; + return 0; +} + +static struct thermal_zone_device_ops cxgb4_thermal_ops = { + .get_temp = cxgb4_thermal_get_temp, + .get_trip_type = cxgb4_thermal_get_trip_type, + .get_trip_temp = cxgb4_thermal_get_trip_temp, +}; + +int cxgb4_thermal_init(struct adapter *adap) +{ + struct ch_thermal *ch_thermal = &adap->ch_thermal; + int num_trip = CXGB4_NUM_TRIPS; + u32 param, val; + int ret; + + /* on older firmwares we may not get the trip temperature, + * set the num of trips to 0. + */ + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) | + FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_MAXTMPTHRESH)); + + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, + ¶m, &val); + if (ret < 0) { + num_trip = 0; /* could not get trip temperature */ + } else { + ch_thermal->trip_temp = val * 1000; + ch_thermal->trip_type = THERMAL_TRIP_CRITICAL; + } + + ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip, + 0, adap, + &cxgb4_thermal_ops, + NULL, 0, 0); + if (IS_ERR(ch_thermal->tzdev)) { + ret = PTR_ERR(ch_thermal->tzdev); + dev_err(adap->pdev_dev, "Failed to register thermal zone\n"); + ch_thermal->tzdev = NULL; + return ret; + } + return 0; +} + +int cxgb4_thermal_remove(struct adapter *adap) +{ + if (adap->ch_thermal.tzdev) + thermal_zone_device_unregister(adap->ch_thermal.tzdev); + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 4bc211093c98..9a6065a3fa46 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -520,10 +520,20 @@ setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type, txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL); if (!txq_info) return -ENOMEM; + if (uld_type == CXGB4_ULD_CRYPTO) { + i = min_t(int, adap->vres.ncrypto_fc, + num_online_cpus()); + txq_info->ntxq = rounddown(i, adap->params.nports); + if (txq_info->ntxq <= 0) { + dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n"); + kfree(txq_info); + return -EINVAL; + } - i = min_t(int, uld_info->ntxq, num_online_cpus()); - txq_info->ntxq = roundup(i, adap->params.nports); - + } else { + i = min_t(int, uld_info->ntxq, num_online_cpus()); + txq_info->ntxq = roundup(i, adap->params.nports); + } txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq), GFP_KERNEL); if (!txq_info->uldtxq) { @@ -546,11 +556,14 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type, struct cxgb4_lld_info *lli) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int tx_uld_type = TX_ULD(uld_type); + struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type]; lli->rxq_ids = rxq_info->rspq_id; lli->nrxq = rxq_info->nrxq; lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq; lli->nciq = rxq_info->nciq; + lli->ntxq = txq_info->ntxq; } int t4_uld_mem_alloc(struct adapter *adap) @@ -634,7 +647,6 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) lld->ports = adap->port; lld->vr = &adap->vres; lld->mtus = adap->params.mtus; - lld->ntxq = adap->sge.ofldqsets; lld->nchan = adap->params.nports; lld->nports = adap->params.nports; lld->wr_cred = adap->params.ofldq_wr_cred; @@ -702,15 +714,14 @@ static void uld_attach(struct adapter *adap, unsigned int uld) * about any presently available devices that support its type. Returns * %-EBUSY if a ULD of the same type is already registered. */ -int cxgb4_register_uld(enum cxgb4_uld type, - const struct cxgb4_uld_info *p) +void cxgb4_register_uld(enum cxgb4_uld type, + const struct cxgb4_uld_info *p) { int ret = 0; - unsigned int adap_idx = 0; struct adapter *adap; if (type >= CXGB4_ULD_MAX) - return -EINVAL; + return; mutex_lock(&uld_mutex); list_for_each_entry(adap, &adapter_list, list_node) { @@ -733,52 +744,29 @@ int cxgb4_register_uld(enum cxgb4_uld type, } if (adap->flags & FULL_INIT_DONE) enable_rx_uld(adap, type); - if (adap->uld[type].add) { - ret = -EBUSY; + if (adap->uld[type].add) goto free_irq; - } ret = setup_sge_txq_uld(adap, type, p); if (ret) goto free_irq; adap->uld[type] = *p; uld_attach(adap, type); - adap_idx++; - } - mutex_unlock(&uld_mutex); - return 0; - + continue; free_irq: - if (adap->flags & FULL_INIT_DONE) - quiesce_rx_uld(adap, type); - if (adap->flags & USING_MSIX) - free_msix_queue_irqs_uld(adap, type); -free_rxq: - free_sge_queues_uld(adap, type); -free_queues: - free_queues_uld(adap, type); -out: - - list_for_each_entry(adap, &adapter_list, list_node) { - if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || - (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) - continue; - if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) - continue; - if (!adap_idx) - break; - adap->uld[type].handle = NULL; - adap->uld[type].add = NULL; - release_sge_txq_uld(adap, type); if (adap->flags & FULL_INIT_DONE) quiesce_rx_uld(adap, type); if (adap->flags & USING_MSIX) free_msix_queue_irqs_uld(adap, type); +free_rxq: free_sge_queues_uld(adap, type); +free_queues: free_queues_uld(adap, type); - adap_idx--; +out: + dev_warn(adap->pdev_dev, + "ULD registration failed for uld type %d\n", type); } mutex_unlock(&uld_mutex); - return ret; + return; } EXPORT_SYMBOL(cxgb4_register_uld); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index de9ad311dacd..5fa9a2d5fc4b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -384,7 +384,7 @@ struct cxgb4_uld_info { int (*tx_handler)(struct sk_buff *skb, struct net_device *dev); }; -int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); +void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); int cxgb4_unregister_uld(enum cxgb4_uld type); int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); int cxgb4_immdata_send(struct net_device *dev, unsigned int idx, diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 301c4df8a566..99022c0898b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -433,10 +433,12 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, else lport = netdev2pinfo(physdev)->lport; - if (is_vlan_dev(neigh->dev)) + if (is_vlan_dev(neigh->dev)) { vlan = vlan_dev_vlan_id(neigh->dev); - else + vlan |= vlan_dev_get_egress_qos_mask(neigh->dev, priority); + } else { vlan = VLAN_NONE; + } write_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 7fc656680299..52edb688942b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -38,7 +38,6 @@ #include "cxgb4.h" #include "sched.h" -/* Spinlock must be held by caller */ static int t4_sched_class_fw_cmd(struct port_info *pi, struct ch_sched_params *p, enum sched_fw_ops op) @@ -67,7 +66,6 @@ static int t4_sched_class_fw_cmd(struct port_info *pi, return err; } -/* Spinlock must be held by caller */ static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg, enum sched_bind_type type, bool bind) { @@ -163,7 +161,6 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) if (e && index >= 0) { int i = 0; - spin_lock(&e->lock); list_for_each_entry(qe, &e->queue_list, list) { if (i == index) break; @@ -171,10 +168,8 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) } err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, false); - if (err) { - spin_unlock(&e->lock); - goto out; - } + if (err) + return err; list_del(&qe->list); kvfree(qe); @@ -182,9 +177,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) e->state = SCHED_STATE_UNUSED; memset(&e->info, 0, sizeof(e->info)); } - spin_unlock(&e->lock); } -out: return err; } @@ -210,10 +203,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) /* Unbind queue from any existing class */ err = t4_sched_queue_unbind(pi, p); - if (err) { - kvfree(qe); - goto out; - } + if (err) + goto out_err; /* Bind queue to specified class */ memset(qe, 0, sizeof(*qe)); @@ -221,18 +212,16 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) memcpy(&qe->param, p, sizeof(qe->param)); e = &s->tab[qe->param.class]; - spin_lock(&e->lock); err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true); - if (err) { - kvfree(qe); - spin_unlock(&e->lock); - goto out; - } + if (err) + goto out_err; list_add_tail(&qe->list, &e->queue_list); atomic_inc(&e->refcnt); - spin_unlock(&e->lock); -out: + return err; + +out_err: + kvfree(qe); return err; } @@ -296,8 +285,6 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg, enum sched_bind_type type) { struct port_info *pi = netdev2pinfo(dev); - struct sched_table *s; - int err = 0; u8 class_id; if (!can_sched(dev)) @@ -323,12 +310,8 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg, if (class_id == SCHED_CLS_NONE) return -ENOTSUPP; - s = pi->sched_tbl; - write_lock(&s->rw_lock); - err = t4_sched_class_bind_unbind_op(pi, arg, type, true); - write_unlock(&s->rw_lock); + return t4_sched_class_bind_unbind_op(pi, arg, type, true); - return err; } /** @@ -343,8 +326,6 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, enum sched_bind_type type) { struct port_info *pi = netdev2pinfo(dev); - struct sched_table *s; - int err = 0; u8 class_id; if (!can_sched(dev)) @@ -367,12 +348,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, if (!valid_class_id(dev, class_id)) return -EINVAL; - s = pi->sched_tbl; - write_lock(&s->rw_lock); - err = t4_sched_class_bind_unbind_op(pi, arg, type, false); - write_unlock(&s->rw_lock); - - return err; + return t4_sched_class_bind_unbind_op(pi, arg, type, false); } /* If @p is NULL, fetch any available unused class */ @@ -425,7 +401,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, static struct sched_class *t4_sched_class_alloc(struct port_info *pi, struct ch_sched_params *p) { - struct sched_table *s = pi->sched_tbl; struct sched_class *e; u8 class_id; int err; @@ -441,7 +416,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, if (class_id != SCHED_CLS_NONE) return NULL; - write_lock(&s->rw_lock); /* See if there's an exisiting class with same * requested sched params */ @@ -452,27 +426,19 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, /* Fetch any available unused class */ e = t4_sched_class_lookup(pi, NULL); if (!e) - goto out; + return NULL; memcpy(&np, p, sizeof(np)); np.u.params.class = e->idx; - - spin_lock(&e->lock); /* New class */ err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD); - if (err) { - spin_unlock(&e->lock); - e = NULL; - goto out; - } + if (err) + return NULL; memcpy(&e->info, &np, sizeof(e->info)); atomic_set(&e->refcnt, 0); e->state = SCHED_STATE_ACTIVE; - spin_unlock(&e->lock); } -out: - write_unlock(&s->rw_lock); return e; } @@ -517,14 +483,12 @@ struct sched_table *t4_init_sched(unsigned int sched_size) return NULL; s->sched_size = sched_size; - rwlock_init(&s->rw_lock); for (i = 0; i < s->sched_size; i++) { memset(&s->tab[i], 0, sizeof(struct sched_class)); s->tab[i].idx = i; s->tab[i].state = SCHED_STATE_UNUSED; INIT_LIST_HEAD(&s->tab[i].queue_list); - spin_lock_init(&s->tab[i].lock); atomic_set(&s->tab[i].refcnt, 0); } return s; @@ -545,11 +509,9 @@ void t4_cleanup_sched(struct adapter *adap) for (i = 0; i < s->sched_size; i++) { struct sched_class *e; - write_lock(&s->rw_lock); e = &s->tab[i]; if (e->state == SCHED_STATE_ACTIVE) t4_sched_class_free(pi, e); - write_unlock(&s->rw_lock); } kvfree(s); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h index 3a49e00a38a1..168fb4ce3759 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.h +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -69,13 +69,11 @@ struct sched_class { u8 idx; struct ch_sched_params info; struct list_head queue_list; - spinlock_t lock; /* Per class lock */ atomic_t refcnt; }; struct sched_table { /* per port scheduling table */ u8 sched_size; - rwlock_t rw_lock; /* Table lock */ struct sched_class tab[0]; }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 6807bc3a44fb..b90188401d4a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2830,6 +2830,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, csum_ok = pkt->csum_calc && !err_vec && (q->netdev->features & NETIF_F_RXCSUM); + + if (err_vec) + rxq->stats.bad_rx_pkts++; + if (((pkt->l2info & htonl(RXF_TCP_F)) || tnl_hdr_len) && (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 5fe5d16dee72..cb523949c812 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3889,7 +3889,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) c.param[0].mnem = cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE)); - c.param[0].val = (__force __be32)op; + c.param[0].val = cpu_to_be32(op); return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); } @@ -4204,6 +4204,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, */ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) { + unsigned int fw_caps = adap->params.fw_caps_support; struct fw_port_cmd c; memset(&c, 0, sizeof(c)); @@ -4211,9 +4212,14 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) FW_CMD_REQUEST_F | FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); c.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | + cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_L1_CFG + : FW_PORT_ACTION_L1_CFG32) | FW_LEN16(c)); - c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG); + if (fw_caps == FW_CAPS16) + c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); + else + c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -10209,7 +10215,9 @@ int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, FW_ACL_VLAN_CMD_VFN_V(vf)); vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd)); /* Drop all packets that donot match vlan id */ - vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F; + vlan_cmd.dropnovlan_fm = (enable + ? (FW_ACL_VLAN_CMD_DROPNOVLAN_F | + FW_ACL_VLAN_CMD_FM_F) : 0); if (enable != 0) { vlan_cmd.nvlan = 1; vlan_cmd.vlanid[0] = cpu_to_be16(vlan); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 5dc6c4154af8..57584ab32043 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1332,6 +1332,7 @@ enum fw_params_param_dev_phyfw { enum fw_params_param_dev_diag { FW_PARAM_DEV_DIAG_TMP = 0x00, FW_PARAM_DEV_DIAG_VDD = 0x01, + FW_PARAM_DEV_DIAG_MAXTMPTHRESH = 0x02, }; enum fw_params_param_dev_fwcache { @@ -2464,6 +2465,7 @@ struct fw_acl_vlan_cmd { #define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7 #define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S) +#define FW_ACL_VLAN_CMD_DROPNOVLAN_F FW_ACL_VLAN_CMD_DROPNOVLAN_V(1U) #define FW_ACL_VLAN_CMD_FM_S 6 #define FW_ACL_VLAN_CMD_FM_M 0x1 diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 1c9ad3630c77..ceec467f590d 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -372,9 +372,8 @@ static int gmac_setup_phy(struct net_device *netdev) return -ENODEV; netdev->phydev = phy; - phy->supported &= PHY_GBIT_FEATURES; - phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; - phy->advertising = phy->supported; + phy_set_max_speed(phy, SPEED_1000); + phy_support_asym_pause(phy); /* set PHY interface type */ switch (phy->interface) { diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 50222b7b81f3..0a82fcf16d35 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1722,8 +1722,7 @@ out: static int dm9000_drv_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct board_info *db; if (ndev) { @@ -1745,8 +1744,7 @@ dm9000_drv_suspend(struct device *dev) static int dm9000_drv_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct board_info *db = netdev_priv(ndev); if (ndev) { diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 5a847941c46b..79521e27f0d1 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -284,13 +284,11 @@ static int dnet_mii_probe(struct net_device *dev) /* mask with MAC supported features */ if (bp->capabilities & DNET_HAS_GIGABIT) - phydev->supported &= PHY_GBIT_FEATURES; + phy_set_max_speed(phydev, SPEED_1000); else - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); - phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; - - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); bp->link = 0; bp->speed = 0; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 58bcee8f0a58..ce041c90adb0 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -185,6 +185,7 @@ static inline void queue_tail_inc(struct be_queue_info *q) struct be_eq_obj { struct be_queue_info q; + char desc[32]; struct be_adapter *adapter; struct napi_struct napi; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 534787291b44..c5ad7a4f4d83 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3488,11 +3488,9 @@ static int be_msix_register(struct be_adapter *adapter) int status, i, vec; for_all_evt_queues(adapter, eqo, i) { - char irq_name[IFNAMSIZ+4]; - - snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i); + sprintf(eqo->desc, "%s-q%d", netdev->name, i); vec = be_msix_vec_get(adapter, eqo); - status = request_irq(vec, be_msix, 0, irq_name, eqo); + status = request_irq(vec, be_msix, 0, eqo->desc, eqo); if (status) goto err_msix; @@ -6148,7 +6146,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) if (status) return PCI_ERS_RESULT_DISCONNECT; - pci_cleanup_aer_uncorrect_error_status(pdev); be_clear_error(adapter, BE_CLEAR_ALL); return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 60da0499ad66..0f3e7f21c6fa 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -721,10 +721,7 @@ static int ethoc_mdio_probe(struct net_device *dev) return err; } - phy->advertising &= ~(ADVERTISED_1000baseT_Full | - ADVERTISED_1000baseT_Half); - phy->supported &= ~(SUPPORTED_1000baseT_Full | - SUPPORTED_1000baseT_Half); + phy_set_max_speed(phy, SPEED_100); return 0; } diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index ed6c76d20b45..4d673225ed3e 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -712,8 +712,8 @@ static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan) return skb_checksum_help(skb) == 0; } -static int ftgmac100_hard_start_xmit(struct sk_buff *skb, - struct net_device *netdev) +static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, + struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); struct ftgmac100_txdes *txdes, *first; @@ -1079,8 +1079,7 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf) /* Indicate that we support PAUSE frames (see comment in * Documentation/networking/phy.txt) */ - phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); /* Display what we found */ phy_attached_info(phydev); @@ -1220,22 +1219,11 @@ static int ftgmac100_set_pauseparam(struct net_device *netdev, priv->tx_pause = pause->tx_pause; priv->rx_pause = pause->rx_pause; - if (phydev) { - phydev->advertising &= ~ADVERTISED_Pause; - phydev->advertising &= ~ADVERTISED_Asym_Pause; + if (phydev) + phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); - if (pause->rx_pause) { - phydev->advertising |= ADVERTISED_Pause; - phydev->advertising |= ADVERTISED_Asym_Pause; - } - - if (pause->tx_pause) - phydev->advertising ^= ADVERTISED_Asym_Pause; - } if (netif_running(netdev)) { - if (phydev && priv->aneg_pause) - phy_start_aneg(phydev); - else + if (!(phydev && priv->aneg_pause)) ftgmac100_config_pause(priv); } diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index a1197d3adbe0..570caeb8ee9e 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -634,8 +634,8 @@ static void ftmac100_tx_complete(struct ftmac100 *priv) ; } -static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb, - dma_addr_t map) +static netdev_tx_t ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb, + dma_addr_t map) { struct net_device *netdev = priv->netdev; struct ftmac100_txdes *txdes; @@ -1016,7 +1016,8 @@ static int ftmac100_stop(struct net_device *netdev) return 0; } -static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t +ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ftmac100 *priv = netdev_priv(netdev); dma_addr_t map; diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index a580a3dcbe59..d3a62bc1f1c6 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -96,5 +96,6 @@ config GIANFAR on the 8540. source "drivers/net/ethernet/freescale/dpaa/Kconfig" +source "drivers/net/ethernet/freescale/dpaa2/Kconfig" endif # NET_VENDOR_FREESCALE diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 0914a3ea4405..3b4ff08e3841 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -21,3 +21,5 @@ ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o obj-$(CONFIG_FSL_FMAN) += fman/ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/ + +obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 65a22cd9aef2..6e0f47f2c8a3 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1280,7 +1280,7 @@ static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, err = bman_release(dpaa_bp->pool, bmb, cnt); /* Should never occur, address anyway to avoid leaking the buffers */ - if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb) + if (WARN_ON(err) && dpaa_bp->free_buf_cb) while (cnt-- > 0) dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); @@ -1704,10 +1704,8 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, skb = build_skb(vaddr, dpaa_bp->size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); - if (unlikely(!skb)) { - WARN_ONCE(1, "Build skb failure on Rx\n"); + if (WARN_ONCE(!skb, "Build skb failure on Rx\n")) goto free_buffer; - } WARN_ON(fd_off != priv->rx_headroom); skb_reserve(skb, fd_off); skb_put(skb, qm_fd_get_length(fd)); @@ -1770,7 +1768,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, sz = dpaa_bp->size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); skb = build_skb(sg_vaddr, sz); - if (WARN_ON(unlikely(!skb))) + if (WARN_ON(!skb)) goto free_buffers; skb->ip_summed = rx_csum_offload(priv, fd); @@ -2046,7 +2044,8 @@ static inline int dpaa_xmit(struct dpaa_priv *priv, return 0; } -static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) +static netdev_tx_t +dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { const int queue_mapping = skb_get_queue_mapping(skb); bool nonlinear = skb_is_nonlinear(skb); @@ -2493,8 +2492,7 @@ static int dpaa_phy_init(struct net_device *net_dev) /* Remove any features not supported by the controller */ phy_dev->supported &= mac_dev->if_support; - phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); - phy_dev->advertising = phy_dev->supported; + phy_support_asym_pause(phy_dev); mac_dev->phy_dev = phy_dev; net_dev->phydev = phy_dev; @@ -2733,8 +2731,6 @@ out_error: return err; } -static const struct of_device_id dpaa_match[]; - static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) { u16 headroom; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 3184c8f7cdd0..13d6e2272ece 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -182,7 +182,6 @@ static int dpaa_set_pauseparam(struct net_device *net_dev, struct phy_device *phydev; bool rx_pause, tx_pause; struct dpaa_priv *priv; - u32 newadv, oldadv; int err; priv = netdev_priv(net_dev); @@ -194,9 +193,7 @@ static int dpaa_set_pauseparam(struct net_device *net_dev, return -ENODEV; } - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - (epause->rx_pause != epause->tx_pause))) + if (!phy_validate_pause(phydev, epause)) return -EINVAL; /* The MAC should know how to handle PAUSE frame autonegotiation before @@ -210,29 +207,8 @@ static int dpaa_set_pauseparam(struct net_device *net_dev, /* Determine the sym/asym advertised PAUSE capabilities from the desired * rx/tx pause settings. */ - newadv = 0; - if (epause->rx_pause) - newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; - if (epause->tx_pause) - newadv ^= ADVERTISED_Asym_Pause; - oldadv = phydev->advertising & - (ADVERTISED_Pause | ADVERTISED_Asym_Pause); - - /* If there are differences between the old and the new advertised - * values, restart PHY autonegotiation and advertise the new values. - */ - if (oldadv != newadv) { - phydev->advertising &= ~(ADVERTISED_Pause - | ADVERTISED_Asym_Pause); - phydev->advertising |= newadv; - if (phydev->autoneg) { - err = phy_start_aneg(phydev); - if (err < 0) - netdev_err(net_dev, "phy_start_aneg() = %d\n", - err); - } - } + phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig new file mode 100644 index 000000000000..809a155eb193 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig @@ -0,0 +1,16 @@ +config FSL_DPAA2_ETH + tristate "Freescale DPAA2 Ethernet" + depends on FSL_MC_BUS && FSL_MC_DPIO + help + This is the DPAA2 Ethernet driver supporting Freescale SoCs + with DPAA2 (DataPath Acceleration Architecture v2). + The driver manages network objects discovered on the Freescale + MC bus. + +config FSL_DPAA2_PTP_CLOCK + tristate "Freescale DPAA2 PTP Clock" + depends on FSL_DPAA2_ETH && POSIX_TIMERS + select PTP_1588_CLOCK + help + This driver adds support for using the DPAA2 1588 timer module + as a PTP clock. diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile new file mode 100644 index 000000000000..2f424e0a8225 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Freescale DPAA2 Ethernet controller +# + +obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o +obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o + +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o +fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o + +# Needed by the tracing framework +CFLAGS_dpaa2-eth.o := -I$(src) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h new file mode 100644 index 000000000000..9801528db2a5 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2014-2015 Freescale Semiconductor Inc. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM dpaa2_eth + +#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _DPAA2_ETH_TRACE_H + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include "dpaa2-eth.h" +#include <linux/tracepoint.h> + +#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u" +/* trace_printk format for raw buffer event class */ +#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d" + +/* This is used to declare a class of events. + * individual events of this type will be defined below. + */ + +/* Store details about a frame descriptor */ +DECLARE_EVENT_CLASS(dpaa2_eth_fd, + /* Trace function prototype */ + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + /* Repeat argument list here */ + TP_ARGS(netdev, fd), + + /* A structure containing the relevant information we want + * to record. Declare name and type for each normal element, + * name, type and size for arrays. Use __string for variable + * length strings. + */ + TP_STRUCT__entry( + __field(u64, fd_addr) + __field(u32, fd_len) + __field(u16, fd_offset) + __string(name, netdev->name) + ), + + /* The function that assigns values to the above declared + * fields + */ + TP_fast_assign( + __entry->fd_addr = dpaa2_fd_get_addr(fd); + __entry->fd_len = dpaa2_fd_get_len(fd); + __entry->fd_offset = dpaa2_fd_get_offset(fd); + __assign_str(name, netdev->name); + ), + + /* This is what gets printed when the trace event is + * triggered. + */ + TP_printk(TR_FMT, + __get_str(name), + __entry->fd_addr, + __entry->fd_len, + __entry->fd_offset) +); + +/* Now declare events of the above type. Format is: + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class + */ + +/* Tx (egress) fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + +/* Rx fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + +/* Tx confirmation fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + +/* Log data about raw buffers. Useful for tracing DPBP content. */ +TRACE_EVENT(dpaa2_eth_buf_seed, + /* Trace function prototype */ + TP_PROTO(struct net_device *netdev, + /* virtual address and size */ + void *vaddr, + size_t size, + /* dma map address and size */ + dma_addr_t dma_addr, + size_t map_size, + /* buffer pool id, if relevant */ + u16 bpid), + + /* Repeat argument list here */ + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), + + /* A structure containing the relevant information we want + * to record. Declare name and type for each normal element, + * name, type and size for arrays. Use __string for variable + * length strings. + */ + TP_STRUCT__entry( + __field(void *, vaddr) + __field(size_t, size) + __field(dma_addr_t, dma_addr) + __field(size_t, map_size) + __field(u16, bpid) + __string(name, netdev->name) + ), + + /* The function that assigns values to the above declared + * fields + */ + TP_fast_assign( + __entry->vaddr = vaddr; + __entry->size = size; + __entry->dma_addr = dma_addr; + __entry->map_size = map_size; + __entry->bpid = bpid; + __assign_str(name, netdev->name); + ), + + /* This is what gets printed when the trace event is + * triggered. + */ + TP_printk(TR_BUF_FMT, + __get_str(name), + __entry->vaddr, + __entry->size, + &__entry->dma_addr, + __entry->map_size, + __entry->bpid) +); + +/* If only one event of a certain type needs to be declared, use TRACE_EVENT(). + * The syntax is the same as for DECLARE_EVENT_CLASS(). + */ + +#endif /* _DPAA2_ETH_TRACE_H */ + +/* This must be outside ifdef _DPAA2_ETH_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE dpaa2-eth-trace +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c new file mode 100644 index 000000000000..88f7acce38dc --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -0,0 +1,2829 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2016-2017 NXP + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/etherdevice.h> +#include <linux/of_net.h> +#include <linux/interrupt.h> +#include <linux/msi.h> +#include <linux/kthread.h> +#include <linux/iommu.h> +#include <linux/net_tstamp.h> +#include <linux/fsl/mc.h> + +#include <net/sock.h> + +#include "dpaa2-eth.h" + +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files + * using trace events only need to #include <trace/events/sched.h> + */ +#define CREATE_TRACE_POINTS +#include "dpaa2-eth-trace.h" + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Freescale Semiconductor, Inc"); +MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); + +static void *dpaa2_iova_to_virt(struct iommu_domain *domain, + dma_addr_t iova_addr) +{ + phys_addr_t phys_addr; + + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; + + return phys_to_virt(phys_addr); +} + +static void validate_rx_csum(struct dpaa2_eth_priv *priv, + u32 fd_status, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* HW checksum validation is disabled, nothing to do here */ + if (!(priv->net_dev->features & NETIF_F_RXCSUM)) + return; + + /* Read checksum validation bits */ + if (!((fd_status & DPAA2_FAS_L3CV) && + (fd_status & DPAA2_FAS_L4CV))) + return; + + /* Inform the stack there's no need to compute L3/L4 csum anymore */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +/* Free a received FD. + * Not to be used for Tx conf FDs or on any other paths. + */ +static void free_rx_fd(struct dpaa2_eth_priv *priv, + const struct dpaa2_fd *fd, + void *vaddr) +{ + struct device *dev = priv->net_dev->dev.parent; + dma_addr_t addr = dpaa2_fd_get_addr(fd); + u8 fd_format = dpaa2_fd_get_format(fd); + struct dpaa2_sg_entry *sgt; + void *sg_vaddr; + int i; + + /* If single buffer frame, just free the data buffer */ + if (fd_format == dpaa2_fd_single) + goto free_buf; + else if (fd_format != dpaa2_fd_sg) + /* We don't support any other format */ + return; + + /* For S/G frames, we first need to free all SG entries + * except the first one, which was taken care of already + */ + sgt = vaddr + dpaa2_fd_get_offset(fd); + for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { + addr = dpaa2_sg_get_addr(&sgt[i]); + sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + + skb_free_frag(sg_vaddr); + if (dpaa2_sg_is_final(&sgt[i])) + break; + } + +free_buf: + skb_free_frag(vaddr); +} + +/* Build a linear skb based on a single-buffer frame descriptor */ +static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + void *fd_vaddr) +{ + struct sk_buff *skb = NULL; + u16 fd_offset = dpaa2_fd_get_offset(fd); + u32 fd_length = dpaa2_fd_get_len(fd); + + ch->buf_count--; + + skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, fd_offset); + skb_put(skb, fd_length); + + return skb; +} + +/* Build a non linear (fragmented) skb based on a S/G table */ +static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_sg_entry *sgt) +{ + struct sk_buff *skb = NULL; + struct device *dev = priv->net_dev->dev.parent; + void *sg_vaddr; + dma_addr_t sg_addr; + u16 sg_offset; + u32 sg_length; + struct page *page, *head_page; + int page_offset; + int i; + + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { + struct dpaa2_sg_entry *sge = &sgt[i]; + + /* NOTE: We only support SG entries in dpaa2_sg_single format, + * but this is the only format we may receive from HW anyway + */ + + /* Get the address and length from the S/G entry */ + sg_addr = dpaa2_sg_get_addr(sge); + sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); + dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + + sg_length = dpaa2_sg_get_len(sge); + + if (i == 0) { + /* We build the skb around the first data buffer */ + skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); + if (unlikely(!skb)) { + /* Free the first SG entry now, since we already + * unmapped it and obtained the virtual address + */ + skb_free_frag(sg_vaddr); + + /* We still need to subtract the buffers used + * by this FD from our software counter + */ + while (!dpaa2_sg_is_final(&sgt[i]) && + i < DPAA2_ETH_MAX_SG_ENTRIES) + i++; + break; + } + + sg_offset = dpaa2_sg_get_offset(sge); + skb_reserve(skb, sg_offset); + skb_put(skb, sg_length); + } else { + /* Rest of the data buffers are stored as skb frags */ + page = virt_to_page(sg_vaddr); + head_page = virt_to_head_page(sg_vaddr); + + /* Offset in page (which may be compound). + * Data in subsequent SG entries is stored from the + * beginning of the buffer, so we don't need to add the + * sg_offset. + */ + page_offset = ((unsigned long)sg_vaddr & + (PAGE_SIZE - 1)) + + (page_address(page) - page_address(head_page)); + + skb_add_rx_frag(skb, i - 1, head_page, page_offset, + sg_length, DPAA2_ETH_RX_BUF_SIZE); + } + + if (dpaa2_sg_is_final(sge)) + break; + } + + WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); + + /* Count all data buffers + SG table buffer */ + ch->buf_count -= i + 2; + + return skb; +} + +/* Main Rx frame processing routine */ +static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct napi_struct *napi, + u16 queue_id) +{ + dma_addr_t addr = dpaa2_fd_get_addr(fd); + u8 fd_format = dpaa2_fd_get_format(fd); + void *vaddr; + struct sk_buff *skb; + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_eth_drv_stats *percpu_extras; + struct device *dev = priv->net_dev->dev.parent; + struct dpaa2_fas *fas; + void *buf_data; + u32 status = 0; + + /* Tracing point */ + trace_dpaa2_rx_fd(priv->net_dev, fd); + + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); + + fas = dpaa2_get_fas(vaddr, false); + prefetch(fas); + buf_data = vaddr + dpaa2_fd_get_offset(fd); + prefetch(buf_data); + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + percpu_extras = this_cpu_ptr(priv->percpu_extras); + + if (fd_format == dpaa2_fd_single) { + skb = build_linear_skb(ch, fd, vaddr); + } else if (fd_format == dpaa2_fd_sg) { + skb = build_frag_skb(priv, ch, buf_data); + skb_free_frag(vaddr); + percpu_extras->rx_sg_frames++; + percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); + } else { + /* We don't support any other format */ + goto err_frame_format; + } + + if (unlikely(!skb)) + goto err_build_skb; + + prefetch(skb->data); + + /* Get the timestamp value */ + if (priv->rx_tstamp) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + __le64 *ts = dpaa2_get_ts(vaddr, false); + u64 ns; + + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + + ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); + shhwtstamps->hwtstamp = ns_to_ktime(ns); + } + + /* Check if we need to validate the L4 csum */ + if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { + status = le32_to_cpu(fas->status); + validate_rx_csum(priv, status, skb); + } + + skb->protocol = eth_type_trans(skb, priv->net_dev); + skb_record_rx_queue(skb, queue_id); + + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); + + napi_gro_receive(napi, skb); + + return; + +err_build_skb: + free_rx_fd(priv, fd, vaddr); +err_frame_format: + percpu_stats->rx_dropped++; +} + +/* Consume all frames pull-dequeued into the store. This is the simplest way to + * make sure we don't accidentally issue another volatile dequeue which would + * overwrite (leak) frames already in the store. + * + * Observance of NAPI budget is not our concern, leaving that to the caller. + */ +static int consume_frames(struct dpaa2_eth_channel *ch, + enum dpaa2_eth_fq_type *type) +{ + struct dpaa2_eth_priv *priv = ch->priv; + struct dpaa2_eth_fq *fq = NULL; + struct dpaa2_dq *dq; + const struct dpaa2_fd *fd; + int cleaned = 0; + int is_last; + + do { + dq = dpaa2_io_store_next(ch->store, &is_last); + if (unlikely(!dq)) { + /* If we're here, we *must* have placed a + * volatile dequeue comnmand, so keep reading through + * the store until we get some sort of valid response + * token (either a valid frame or an "empty dequeue") + */ + continue; + } + + fd = dpaa2_dq_fd(dq); + fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); + + fq->consume(priv, ch, fd, &ch->napi, fq->flowid); + cleaned++; + } while (!is_last); + + if (!cleaned) + return 0; + + fq->stats.frames += cleaned; + ch->stats.frames += cleaned; + + /* A dequeue operation only pulls frames from a single queue + * into the store. Return the frame queue type as an out param. + */ + if (type) + *type = fq->type; + + return cleaned; +} + +/* Configure the egress frame annotation for timestamp update */ +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) +{ + struct dpaa2_faead *faead; + u32 ctrl, frc; + + /* Mark the egress frame annotation area as valid */ + frc = dpaa2_fd_get_frc(fd); + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); + + /* Set hardware annotation size */ + ctrl = dpaa2_fd_get_ctrl(fd); + dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); + + /* enable UPD (update prepanded data) bit in FAEAD field of + * hardware frame annotation area + */ + ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; + faead = dpaa2_get_faead(buf_start, true); + faead->ctrl = cpu_to_le32(ctrl); +} + +/* Create a frame descriptor based on a fragmented skb */ +static int build_sg_fd(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) +{ + struct device *dev = priv->net_dev->dev.parent; + void *sgt_buf = NULL; + dma_addr_t addr; + int nr_frags = skb_shinfo(skb)->nr_frags; + struct dpaa2_sg_entry *sgt; + int i, err; + int sgt_buf_size; + struct scatterlist *scl, *crt_scl; + int num_sg; + int num_dma_bufs; + struct dpaa2_eth_swa *swa; + + /* Create and map scatterlist. + * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have + * to go beyond nr_frags+1. + * Note: We don't support chained scatterlists + */ + if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) + return -EINVAL; + + scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); + if (unlikely(!scl)) + return -ENOMEM; + + sg_init_table(scl, nr_frags + 1); + num_sg = skb_to_sgvec(skb, scl, 0, skb->len); + num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); + if (unlikely(!num_dma_bufs)) { + err = -ENOMEM; + goto dma_map_sg_failed; + } + + /* Prepare the HW SGT structure */ + sgt_buf_size = priv->tx_data_offset + + sizeof(struct dpaa2_sg_entry) * num_dma_bufs; + sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); + if (unlikely(!sgt_buf)) { + err = -ENOMEM; + goto sgt_buf_alloc_failed; + } + sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); + memset(sgt_buf, 0, sgt_buf_size); + + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); + + /* Fill in the HW SGT structure. + * + * sgt_buf is zeroed out, so the following fields are implicit + * in all sgt entries: + * - offset is 0 + * - format is 'dpaa2_sg_single' + */ + for_each_sg(scl, crt_scl, num_dma_bufs, i) { + dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); + dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); + } + dpaa2_sg_set_final(&sgt[i - 1], true); + + /* Store the skb backpointer in the SGT buffer. + * Fit the scatterlist and the number of buffers alongside the + * skb backpointer in the software annotation area. We'll need + * all of them on Tx Conf. + */ + swa = (struct dpaa2_eth_swa *)sgt_buf; + swa->skb = skb; + swa->scl = scl; + swa->num_sg = num_sg; + swa->sgt_size = sgt_buf_size; + + /* Separately map the SGT buffer */ + addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) { + err = -ENOMEM; + goto dma_map_single_failed; + } + dpaa2_fd_set_offset(fd, priv->tx_data_offset); + dpaa2_fd_set_format(fd, dpaa2_fd_sg); + dpaa2_fd_set_addr(fd, addr); + dpaa2_fd_set_len(fd, skb->len); + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); + + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + enable_tx_tstamp(fd, sgt_buf); + + return 0; + +dma_map_single_failed: + skb_free_frag(sgt_buf); +sgt_buf_alloc_failed: + dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); +dma_map_sg_failed: + kfree(scl); + return err; +} + +/* Create a frame descriptor based on a linear skb */ +static int build_single_fd(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) +{ + struct device *dev = priv->net_dev->dev.parent; + u8 *buffer_start, *aligned_start; + struct sk_buff **skbh; + dma_addr_t addr; + + buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb); + + /* If there's enough room to align the FD address, do it. + * It will help hardware optimize accesses. + */ + aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, + DPAA2_ETH_TX_BUF_ALIGN); + if (aligned_start >= skb->head) + buffer_start = aligned_start; + + /* Store a backpointer to the skb at the beginning of the buffer + * (in the private data area) such that we can release it + * on Tx confirm + */ + skbh = (struct sk_buff **)buffer_start; + *skbh = skb; + + addr = dma_map_single(dev, buffer_start, + skb_tail_pointer(skb) - buffer_start, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) + return -ENOMEM; + + dpaa2_fd_set_addr(fd, addr); + dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); + dpaa2_fd_set_len(fd, skb->len); + dpaa2_fd_set_format(fd, dpaa2_fd_single); + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); + + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + enable_tx_tstamp(fd, buffer_start); + + return 0; +} + +/* FD freeing routine on the Tx path + * + * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb + * back-pointed to is also freed. + * This can be called either from dpaa2_eth_tx_conf() or on the error path of + * dpaa2_eth_tx(). + */ +static void free_tx_fd(const struct dpaa2_eth_priv *priv, + const struct dpaa2_fd *fd) +{ + struct device *dev = priv->net_dev->dev.parent; + dma_addr_t fd_addr; + struct sk_buff **skbh, *skb; + unsigned char *buffer_start; + struct dpaa2_eth_swa *swa; + u8 fd_format = dpaa2_fd_get_format(fd); + + fd_addr = dpaa2_fd_get_addr(fd); + skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); + + if (fd_format == dpaa2_fd_single) { + skb = *skbh; + buffer_start = (unsigned char *)skbh; + /* Accessing the skb buffer is safe before dma unmap, because + * we didn't map the actual skb shell. + */ + dma_unmap_single(dev, fd_addr, + skb_tail_pointer(skb) - buffer_start, + DMA_BIDIRECTIONAL); + } else if (fd_format == dpaa2_fd_sg) { + swa = (struct dpaa2_eth_swa *)skbh; + skb = swa->skb; + + /* Unmap the scatterlist */ + dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL); + kfree(swa->scl); + + /* Unmap the SGT buffer */ + dma_unmap_single(dev, fd_addr, swa->sgt_size, + DMA_BIDIRECTIONAL); + } else { + netdev_dbg(priv->net_dev, "Invalid FD format\n"); + return; + } + + /* Get the timestamp value */ + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + struct skb_shared_hwtstamps shhwtstamps; + __le64 *ts = dpaa2_get_ts(skbh, true); + u64 ns; + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + + ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + } + + /* Free SGT buffer allocated on tx */ + if (fd_format != dpaa2_fd_single) + skb_free_frag(skbh); + + /* Move on with skb release */ + dev_kfree_skb(skb); +} + +static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpaa2_fd fd; + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_eth_drv_stats *percpu_extras; + struct dpaa2_eth_fq *fq; + u16 queue_mapping; + unsigned int needed_headroom; + int err, i; + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + percpu_extras = this_cpu_ptr(priv->percpu_extras); + + needed_headroom = dpaa2_eth_needed_headroom(priv, skb); + if (skb_headroom(skb) < needed_headroom) { + struct sk_buff *ns; + + ns = skb_realloc_headroom(skb, needed_headroom); + if (unlikely(!ns)) { + percpu_stats->tx_dropped++; + goto err_alloc_headroom; + } + percpu_extras->tx_reallocs++; + + if (skb->sk) + skb_set_owner_w(ns, skb->sk); + + dev_kfree_skb(skb); + skb = ns; + } + + /* We'll be holding a back-reference to the skb until Tx Confirmation; + * we don't want that overwritten by a concurrent Tx with a cloned skb. + */ + skb = skb_unshare(skb, GFP_ATOMIC); + if (unlikely(!skb)) { + /* skb_unshare() has already freed the skb */ + percpu_stats->tx_dropped++; + return NETDEV_TX_OK; + } + + /* Setup the FD fields */ + memset(&fd, 0, sizeof(fd)); + + if (skb_is_nonlinear(skb)) { + err = build_sg_fd(priv, skb, &fd); + percpu_extras->tx_sg_frames++; + percpu_extras->tx_sg_bytes += skb->len; + } else { + err = build_single_fd(priv, skb, &fd); + } + + if (unlikely(err)) { + percpu_stats->tx_dropped++; + goto err_build_fd; + } + + /* Tracing point */ + trace_dpaa2_tx_fd(net_dev, &fd); + + /* TxConf FQ selection relies on queue id from the stack. + * In case of a forwarded frame from another DPNI interface, we choose + * a queue affined to the same core that processed the Rx frame + */ + queue_mapping = skb_get_queue_mapping(skb); + fq = &priv->fq[queue_mapping]; + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { + err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, + priv->tx_qdid, 0, + fq->tx_qdbin, &fd); + if (err != -EBUSY) + break; + } + percpu_extras->tx_portal_busy += i; + if (unlikely(err < 0)) { + percpu_stats->tx_errors++; + /* Clean up everything, including freeing the skb */ + free_tx_fd(priv, &fd); + } else { + percpu_stats->tx_packets++; + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); + } + + return NETDEV_TX_OK; + +err_build_fd: +err_alloc_headroom: + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* Tx confirmation frame processing routine */ +static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch __always_unused, + const struct dpaa2_fd *fd, + struct napi_struct *napi __always_unused, + u16 queue_id __always_unused) +{ + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_eth_drv_stats *percpu_extras; + u32 fd_errors; + + /* Tracing point */ + trace_dpaa2_tx_conf_fd(priv->net_dev, fd); + + percpu_extras = this_cpu_ptr(priv->percpu_extras); + percpu_extras->tx_conf_frames++; + percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); + + /* Check frame errors in the FD field */ + fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; + free_tx_fd(priv, fd); + + if (likely(!fd_errors)) + return; + + if (net_ratelimit()) + netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", + fd_errors); + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + /* Tx-conf logically pertains to the egress path. */ + percpu_stats->tx_errors++; +} + +static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) +{ + int err; + + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, + DPNI_OFF_RX_L3_CSUM, enable); + if (err) { + netdev_err(priv->net_dev, + "dpni_set_offload(RX_L3_CSUM) failed\n"); + return err; + } + + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, + DPNI_OFF_RX_L4_CSUM, enable); + if (err) { + netdev_err(priv->net_dev, + "dpni_set_offload(RX_L4_CSUM) failed\n"); + return err; + } + + return 0; +} + +static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) +{ + int err; + + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, + DPNI_OFF_TX_L3_CSUM, enable); + if (err) { + netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); + return err; + } + + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, + DPNI_OFF_TX_L4_CSUM, enable); + if (err) { + netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); + return err; + } + + return 0; +} + +/* Free buffers acquired from the buffer pool or which were meant to + * be released in the pool + */ +static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) +{ + struct device *dev = priv->net_dev->dev.parent; + void *vaddr; + int i; + + for (i = 0; i < count; i++) { + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); + dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + skb_free_frag(vaddr); + } +} + +/* Perform a single release command to add buffers + * to the specified buffer pool + */ +static int add_bufs(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, u16 bpid) +{ + struct device *dev = priv->net_dev->dev.parent; + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + void *buf; + dma_addr_t addr; + int i, err; + + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { + /* Allocate buffer visible to WRIOP + skb shared info + + * alignment padding + */ + buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv)); + if (unlikely(!buf)) + goto err_alloc; + + buf = PTR_ALIGN(buf, priv->rx_buf_align); + + addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, addr))) + goto err_map; + + buf_array[i] = addr; + + /* tracing point */ + trace_dpaa2_eth_buf_seed(priv->net_dev, + buf, dpaa2_eth_buf_raw_size(priv), + addr, DPAA2_ETH_RX_BUF_SIZE, + bpid); + } + +release_bufs: + /* In case the portal is busy, retry until successful */ + while ((err = dpaa2_io_service_release(ch->dpio, bpid, + buf_array, i)) == -EBUSY) + cpu_relax(); + + /* If release command failed, clean up and bail out; + * not much else we can do about it + */ + if (err) { + free_bufs(priv, buf_array, i); + return 0; + } + + return i; + +err_map: + skb_free_frag(buf); +err_alloc: + /* If we managed to allocate at least some buffers, + * release them to hardware + */ + if (i) + goto release_bufs; + + return 0; +} + +static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) +{ + int i, j; + int new_count; + + /* This is the lazy seeding of Rx buffer pools. + * dpaa2_add_bufs() is also used on the Rx hotpath and calls + * napi_alloc_frag(). The trouble with that is that it in turn ends up + * calling this_cpu_ptr(), which mandates execution in atomic context. + * Rather than splitting up the code, do a one-off preempt disable. + */ + preempt_disable(); + for (j = 0; j < priv->num_channels; j++) { + for (i = 0; i < DPAA2_ETH_NUM_BUFS; + i += DPAA2_ETH_BUFS_PER_CMD) { + new_count = add_bufs(priv, priv->channel[j], bpid); + priv->channel[j]->buf_count += new_count; + + if (new_count < DPAA2_ETH_BUFS_PER_CMD) { + preempt_enable(); + return -ENOMEM; + } + } + } + preempt_enable(); + + return 0; +} + +/** + * Drain the specified number of buffers from the DPNI's private buffer pool. + * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD + */ +static void drain_bufs(struct dpaa2_eth_priv *priv, int count) +{ + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + int ret; + + do { + ret = dpaa2_io_service_acquire(NULL, priv->bpid, + buf_array, count); + if (ret < 0) { + netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); + return; + } + free_bufs(priv, buf_array, ret); + } while (ret); +} + +static void drain_pool(struct dpaa2_eth_priv *priv) +{ + int i; + + drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); + drain_bufs(priv, 1); + + for (i = 0; i < priv->num_channels; i++) + priv->channel[i]->buf_count = 0; +} + +/* Function is called from softirq context only, so we don't need to guard + * the access to percpu count + */ +static int refill_pool(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + u16 bpid) +{ + int new_count; + + if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) + return 0; + + do { + new_count = add_bufs(priv, ch, bpid); + if (unlikely(!new_count)) { + /* Out of memory; abort for now, we'll try later on */ + break; + } + ch->buf_count += new_count; + } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); + + if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) + return -ENOMEM; + + return 0; +} + +static int pull_channel(struct dpaa2_eth_channel *ch) +{ + int err; + int dequeues = -1; + + /* Retry while portal is busy */ + do { + err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, + ch->store); + dequeues++; + cpu_relax(); + } while (err == -EBUSY); + + ch->stats.dequeue_portal_busy += dequeues; + if (unlikely(err)) + ch->stats.pull_err++; + + return err; +} + +/* NAPI poll routine + * + * Frames are dequeued from the QMan channel associated with this NAPI context. + * Rx, Tx confirmation and (if configured) Rx error frames all count + * towards the NAPI budget. + */ +static int dpaa2_eth_poll(struct napi_struct *napi, int budget) +{ + struct dpaa2_eth_channel *ch; + struct dpaa2_eth_priv *priv; + int rx_cleaned = 0, txconf_cleaned = 0; + enum dpaa2_eth_fq_type type = 0; + int store_cleaned; + int err; + + ch = container_of(napi, struct dpaa2_eth_channel, napi); + priv = ch->priv; + + do { + err = pull_channel(ch); + if (unlikely(err)) + break; + + /* Refill pool if appropriate */ + refill_pool(priv, ch, priv->bpid); + + store_cleaned = consume_frames(ch, &type); + if (type == DPAA2_RX_FQ) + rx_cleaned += store_cleaned; + else + txconf_cleaned += store_cleaned; + + /* If we either consumed the whole NAPI budget with Rx frames + * or we reached the Tx confirmations threshold, we're done. + */ + if (rx_cleaned >= budget || + txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) + return budget; + } while (store_cleaned); + + /* We didn't consume the entire budget, so finish napi and + * re-enable data availability notifications + */ + napi_complete_done(napi, rx_cleaned); + do { + err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); + cpu_relax(); + } while (err == -EBUSY); + WARN_ONCE(err, "CDAN notifications rearm failed on core %d", + ch->nctx.desired_cpu); + + return max(rx_cleaned, 1); +} + +static void enable_ch_napi(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_eth_channel *ch; + int i; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + napi_enable(&ch->napi); + } +} + +static void disable_ch_napi(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_eth_channel *ch; + int i; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + napi_disable(&ch->napi); + } +} + +static int link_state_update(struct dpaa2_eth_priv *priv) +{ + struct dpni_link_state state = {0}; + int err; + + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); + if (unlikely(err)) { + netdev_err(priv->net_dev, + "dpni_get_link_state() failed\n"); + return err; + } + + /* Chech link state; speed / duplex changes are not treated yet */ + if (priv->link_state.up == state.up) + return 0; + + priv->link_state = state; + if (state.up) { + netif_carrier_on(priv->net_dev); + netif_tx_start_all_queues(priv->net_dev); + } else { + netif_tx_stop_all_queues(priv->net_dev); + netif_carrier_off(priv->net_dev); + } + + netdev_info(priv->net_dev, "Link Event: state %s\n", + state.up ? "up" : "down"); + + return 0; +} + +static int dpaa2_eth_open(struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err; + + err = seed_pool(priv, priv->bpid); + if (err) { + /* Not much to do; the buffer pool, though not filled up, + * may still contain some buffers which would enable us + * to limp on. + */ + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", + priv->dpbp_dev->obj_desc.id, priv->bpid); + } + + /* We'll only start the txqs when the link is actually ready; make sure + * we don't race against the link up notification, which may come + * immediately after dpni_enable(); + */ + netif_tx_stop_all_queues(net_dev); + enable_ch_napi(priv); + /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will + * return true and cause 'ip link show' to report the LOWER_UP flag, + * even though the link notification wasn't even received. + */ + netif_carrier_off(net_dev); + + err = dpni_enable(priv->mc_io, 0, priv->mc_token); + if (err < 0) { + netdev_err(net_dev, "dpni_enable() failed\n"); + goto enable_err; + } + + /* If the DPMAC object has already processed the link up interrupt, + * we have to learn the link state ourselves. + */ + err = link_state_update(priv); + if (err < 0) { + netdev_err(net_dev, "Can't update link state\n"); + goto link_state_err; + } + + return 0; + +link_state_err: +enable_err: + disable_ch_napi(priv); + drain_pool(priv); + return err; +} + +/* The DPIO store must be empty when we call this, + * at the end of every NAPI cycle. + */ +static u32 drain_channel(struct dpaa2_eth_channel *ch) +{ + u32 drained = 0, total = 0; + + do { + pull_channel(ch); + drained = consume_frames(ch, NULL); + total += drained; + } while (drained); + + return total; +} + +static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_eth_channel *ch; + int i; + u32 drained = 0; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + drained += drain_channel(ch); + } + + return drained; +} + +static int dpaa2_eth_stop(struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int dpni_enabled = 0; + int retries = 10; + u32 drained; + + netif_tx_stop_all_queues(net_dev); + netif_carrier_off(net_dev); + + /* Loop while dpni_disable() attempts to drain the egress FQs + * and confirm them back to us. + */ + do { + dpni_disable(priv->mc_io, 0, priv->mc_token); + dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); + if (dpni_enabled) + /* Allow the hardware some slack */ + msleep(100); + } while (dpni_enabled && --retries); + if (!retries) { + netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); + /* Must go on and disable NAPI nonetheless, so we don't crash at + * the next "ifconfig up" + */ + } + + /* Wait for NAPI to complete on every core and disable it. + * In particular, this will also prevent NAPI from being rescheduled if + * a new CDAN is serviced, effectively discarding the CDAN. We therefore + * don't even need to disarm the channels, except perhaps for the case + * of a huge coalescing value. + */ + disable_ch_napi(priv); + + /* Manually drain the Rx and TxConf queues */ + drained = drain_ingress_frames(priv); + if (drained) + netdev_dbg(net_dev, "Drained %d frames.\n", drained); + + /* Empty the buffer pool */ + drain_pool(priv); + + return 0; +} + +static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct device *dev = net_dev->dev.parent; + int err; + + err = eth_mac_addr(net_dev, addr); + if (err < 0) { + dev_err(dev, "eth_mac_addr() failed (%d)\n", err); + return err; + } + + err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, + net_dev->dev_addr); + if (err) { + dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); + return err; + } + + return 0; +} + +/** Fill in counters maintained by the GPP driver. These may be different from + * the hardware counters obtained by ethtool. + */ +static void dpaa2_eth_get_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct rtnl_link_stats64 *percpu_stats; + u64 *cpustats; + u64 *netstats = (u64 *)stats; + int i, j; + int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); + + for_each_possible_cpu(i) { + percpu_stats = per_cpu_ptr(priv->percpu_stats, i); + cpustats = (u64 *)percpu_stats; + for (j = 0; j < num; j++) + netstats[j] += cpustats[j]; + } +} + +/* Copy mac unicast addresses from @net_dev to @priv. + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. + */ +static void add_uc_hw_addr(const struct net_device *net_dev, + struct dpaa2_eth_priv *priv) +{ + struct netdev_hw_addr *ha; + int err; + + netdev_for_each_uc_addr(ha, net_dev) { + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, + ha->addr); + if (err) + netdev_warn(priv->net_dev, + "Could not add ucast MAC %pM to the filtering table (err %d)\n", + ha->addr, err); + } +} + +/* Copy mac multicast addresses from @net_dev to @priv + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. + */ +static void add_mc_hw_addr(const struct net_device *net_dev, + struct dpaa2_eth_priv *priv) +{ + struct netdev_hw_addr *ha; + int err; + + netdev_for_each_mc_addr(ha, net_dev) { + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, + ha->addr); + if (err) + netdev_warn(priv->net_dev, + "Could not add mcast MAC %pM to the filtering table (err %d)\n", + ha->addr, err); + } +} + +static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int uc_count = netdev_uc_count(net_dev); + int mc_count = netdev_mc_count(net_dev); + u8 max_mac = priv->dpni_attrs.mac_filter_entries; + u32 options = priv->dpni_attrs.options; + u16 mc_token = priv->mc_token; + struct fsl_mc_io *mc_io = priv->mc_io; + int err; + + /* Basic sanity checks; these probably indicate a misconfiguration */ + if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) + netdev_info(net_dev, + "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", + max_mac); + + /* Force promiscuous if the uc or mc counts exceed our capabilities. */ + if (uc_count > max_mac) { + netdev_info(net_dev, + "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", + uc_count, max_mac); + goto force_promisc; + } + if (mc_count + uc_count > max_mac) { + netdev_info(net_dev, + "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", + uc_count + mc_count, max_mac); + goto force_mc_promisc; + } + + /* Adjust promisc settings due to flag combinations */ + if (net_dev->flags & IFF_PROMISC) + goto force_promisc; + if (net_dev->flags & IFF_ALLMULTI) { + /* First, rebuild unicast filtering table. This should be done + * in promisc mode, in order to avoid frame loss while we + * progressively add entries to the table. + * We don't know whether we had been in promisc already, and + * making an MC call to find out is expensive; so set uc promisc + * nonetheless. + */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set uc promisc\n"); + + /* Actual uc table reconstruction. */ + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); + if (err) + netdev_warn(net_dev, "Can't clear uc filters\n"); + add_uc_hw_addr(net_dev, priv); + + /* Finally, clear uc promisc and set mc promisc as requested. */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); + if (err) + netdev_warn(net_dev, "Can't clear uc promisc\n"); + goto force_mc_promisc; + } + + /* Neither unicast, nor multicast promisc will be on... eventually. + * For now, rebuild mac filtering tables while forcing both of them on. + */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); + + /* Actual mac filtering tables reconstruction */ + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); + if (err) + netdev_warn(net_dev, "Can't clear mac filters\n"); + add_mc_hw_addr(net_dev, priv); + add_uc_hw_addr(net_dev, priv); + + /* Now we can clear both ucast and mcast promisc, without risking + * to drop legitimate frames anymore. + */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); + if (err) + netdev_warn(net_dev, "Can't clear ucast promisc\n"); + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); + if (err) + netdev_warn(net_dev, "Can't clear mcast promisc\n"); + + return; + +force_promisc: + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set ucast promisc\n"); +force_mc_promisc: + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set mcast promisc\n"); +} + +static int dpaa2_eth_set_features(struct net_device *net_dev, + netdev_features_t features) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + netdev_features_t changed = features ^ net_dev->features; + bool enable; + int err; + + if (changed & NETIF_F_RXCSUM) { + enable = !!(features & NETIF_F_RXCSUM); + err = set_rx_csum(priv, enable); + if (err) + return err; + } + + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { + enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); + err = set_tx_csum(priv, enable); + if (err) + return err; + } + + return 0; +} + +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->tx_tstamp = false; + break; + case HWTSTAMP_TX_ON: + priv->tx_tstamp = true; + break; + default: + return -ERANGE; + } + + if (config.rx_filter == HWTSTAMP_FILTER_NONE) { + priv->rx_tstamp = false; + } else { + priv->rx_tstamp = true; + /* TS is set for all frame types, not only those requested */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + } + + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + if (cmd == SIOCSHWTSTAMP) + return dpaa2_eth_ts_ioctl(dev, rq, cmd); + + return -EINVAL; +} + +static const struct net_device_ops dpaa2_eth_ops = { + .ndo_open = dpaa2_eth_open, + .ndo_start_xmit = dpaa2_eth_tx, + .ndo_stop = dpaa2_eth_stop, + .ndo_set_mac_address = dpaa2_eth_set_addr, + .ndo_get_stats64 = dpaa2_eth_get_stats, + .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, + .ndo_set_features = dpaa2_eth_set_features, + .ndo_do_ioctl = dpaa2_eth_ioctl, +}; + +static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) +{ + struct dpaa2_eth_channel *ch; + + ch = container_of(ctx, struct dpaa2_eth_channel, nctx); + + /* Update NAPI statistics */ + ch->stats.cdan++; + + napi_schedule_irqoff(&ch->napi); +} + +/* Allocate and configure a DPCON object */ +static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) +{ + struct fsl_mc_device *dpcon; + struct device *dev = priv->net_dev->dev.parent; + struct dpcon_attr attrs; + int err; + + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), + FSL_MC_POOL_DPCON, &dpcon); + if (err) { + dev_info(dev, "Not enough DPCONs, will go on as-is\n"); + return NULL; + } + + err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); + if (err) { + dev_err(dev, "dpcon_open() failed\n"); + goto free; + } + + err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); + if (err) { + dev_err(dev, "dpcon_reset() failed\n"); + goto close; + } + + err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); + if (err) { + dev_err(dev, "dpcon_get_attributes() failed\n"); + goto close; + } + + err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); + if (err) { + dev_err(dev, "dpcon_enable() failed\n"); + goto close; + } + + return dpcon; + +close: + dpcon_close(priv->mc_io, 0, dpcon->mc_handle); +free: + fsl_mc_object_free(dpcon); + + return NULL; +} + +static void free_dpcon(struct dpaa2_eth_priv *priv, + struct fsl_mc_device *dpcon) +{ + dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); + dpcon_close(priv->mc_io, 0, dpcon->mc_handle); + fsl_mc_object_free(dpcon); +} + +static struct dpaa2_eth_channel * +alloc_channel(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_eth_channel *channel; + struct dpcon_attr attr; + struct device *dev = priv->net_dev->dev.parent; + int err; + + channel = kzalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return NULL; + + channel->dpcon = setup_dpcon(priv); + if (!channel->dpcon) + goto err_setup; + + err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, + &attr); + if (err) { + dev_err(dev, "dpcon_get_attributes() failed\n"); + goto err_get_attr; + } + + channel->dpcon_id = attr.id; + channel->ch_id = attr.qbman_ch_id; + channel->priv = priv; + + return channel; + +err_get_attr: + free_dpcon(priv, channel->dpcon); +err_setup: + kfree(channel); + return NULL; +} + +static void free_channel(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *channel) +{ + free_dpcon(priv, channel->dpcon); + kfree(channel); +} + +/* DPIO setup: allocate and configure QBMan channels, setup core affinity + * and register data availability notifications + */ +static int setup_dpio(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_io_notification_ctx *nctx; + struct dpaa2_eth_channel *channel; + struct dpcon_notification_cfg dpcon_notif_cfg; + struct device *dev = priv->net_dev->dev.parent; + int i, err; + + /* We want the ability to spread ingress traffic (RX, TX conf) to as + * many cores as possible, so we need one channel for each core + * (unless there's fewer queues than cores, in which case the extra + * channels would be wasted). + * Allocate one channel per core and register it to the core's + * affine DPIO. If not enough channels are available for all cores + * or if some cores don't have an affine DPIO, there will be no + * ingress frame processing on those cores. + */ + cpumask_clear(&priv->dpio_cpumask); + for_each_online_cpu(i) { + /* Try to allocate a channel */ + channel = alloc_channel(priv); + if (!channel) { + dev_info(dev, + "No affine channel for cpu %d and above\n", i); + err = -ENODEV; + goto err_alloc_ch; + } + + priv->channel[priv->num_channels] = channel; + + nctx = &channel->nctx; + nctx->is_cdan = 1; + nctx->cb = cdan_cb; + nctx->id = channel->ch_id; + nctx->desired_cpu = i; + + /* Register the new context */ + channel->dpio = dpaa2_io_service_select(i); + err = dpaa2_io_service_register(channel->dpio, nctx); + if (err) { + dev_dbg(dev, "No affine DPIO for cpu %d\n", i); + /* If no affine DPIO for this core, there's probably + * none available for next cores either. Signal we want + * to retry later, in case the DPIO devices weren't + * probed yet. + */ + err = -EPROBE_DEFER; + goto err_service_reg; + } + + /* Register DPCON notification with MC */ + dpcon_notif_cfg.dpio_id = nctx->dpio_id; + dpcon_notif_cfg.priority = 0; + dpcon_notif_cfg.user_ctx = nctx->qman64; + err = dpcon_set_notification(priv->mc_io, 0, + channel->dpcon->mc_handle, + &dpcon_notif_cfg); + if (err) { + dev_err(dev, "dpcon_set_notification failed()\n"); + goto err_set_cdan; + } + + /* If we managed to allocate a channel and also found an affine + * DPIO for this core, add it to the final mask + */ + cpumask_set_cpu(i, &priv->dpio_cpumask); + priv->num_channels++; + + /* Stop if we already have enough channels to accommodate all + * RX and TX conf queues + */ + if (priv->num_channels == dpaa2_eth_queue_count(priv)) + break; + } + + return 0; + +err_set_cdan: + dpaa2_io_service_deregister(channel->dpio, nctx); +err_service_reg: + free_channel(priv, channel); +err_alloc_ch: + if (cpumask_empty(&priv->dpio_cpumask)) { + dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); + return err; + } + + dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", + cpumask_pr_args(&priv->dpio_cpumask)); + + return 0; +} + +static void free_dpio(struct dpaa2_eth_priv *priv) +{ + int i; + struct dpaa2_eth_channel *ch; + + /* deregister CDAN notifications and free channels */ + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + dpaa2_io_service_deregister(ch->dpio, &ch->nctx); + free_channel(priv, ch); + } +} + +static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, + int cpu) +{ + struct device *dev = priv->net_dev->dev.parent; + int i; + + for (i = 0; i < priv->num_channels; i++) + if (priv->channel[i]->nctx.desired_cpu == cpu) + return priv->channel[i]; + + /* We should never get here. Issue a warning and return + * the first channel, because it's still better than nothing + */ + dev_warn(dev, "No affine channel found for cpu %d\n", cpu); + + return priv->channel[0]; +} + +static void set_fq_affinity(struct dpaa2_eth_priv *priv) +{ + struct device *dev = priv->net_dev->dev.parent; + struct cpumask xps_mask; + struct dpaa2_eth_fq *fq; + int rx_cpu, txc_cpu; + int i, err; + + /* For each FQ, pick one channel/CPU to deliver frames to. + * This may well change at runtime, either through irqbalance or + * through direct user intervention. + */ + rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + switch (fq->type) { + case DPAA2_RX_FQ: + fq->target_cpu = rx_cpu; + rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); + if (rx_cpu >= nr_cpu_ids) + rx_cpu = cpumask_first(&priv->dpio_cpumask); + break; + case DPAA2_TX_CONF_FQ: + fq->target_cpu = txc_cpu; + + /* Tell the stack to affine to txc_cpu the Tx queue + * associated with the confirmation one + */ + cpumask_clear(&xps_mask); + cpumask_set_cpu(txc_cpu, &xps_mask); + err = netif_set_xps_queue(priv->net_dev, &xps_mask, + fq->flowid); + if (err) + dev_err(dev, "Error setting XPS queue\n"); + + txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); + if (txc_cpu >= nr_cpu_ids) + txc_cpu = cpumask_first(&priv->dpio_cpumask); + break; + default: + dev_err(dev, "Unknown FQ type: %d\n", fq->type); + } + fq->channel = get_affine_channel(priv, fq->target_cpu); + } +} + +static void setup_fqs(struct dpaa2_eth_priv *priv) +{ + int i; + + /* We have one TxConf FQ per Tx flow. + * The number of Tx and Rx queues is the same. + * Tx queues come first in the fq array. + */ + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { + priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; + priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; + priv->fq[priv->num_fqs++].flowid = (u16)i; + } + + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { + priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; + priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; + priv->fq[priv->num_fqs++].flowid = (u16)i; + } + + /* For each FQ, decide on which core to process incoming frames */ + set_fq_affinity(priv); +} + +/* Allocate and configure one buffer pool for each interface */ +static int setup_dpbp(struct dpaa2_eth_priv *priv) +{ + int err; + struct fsl_mc_device *dpbp_dev; + struct device *dev = priv->net_dev->dev.parent; + struct dpbp_attr dpbp_attrs; + + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, + &dpbp_dev); + if (err) { + dev_err(dev, "DPBP device allocation failed\n"); + return err; + } + + priv->dpbp_dev = dpbp_dev; + + err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, + &dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_open() failed\n"); + goto err_open; + } + + err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_reset() failed\n"); + goto err_reset; + } + + err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_enable() failed\n"); + goto err_enable; + } + + err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, + &dpbp_attrs); + if (err) { + dev_err(dev, "dpbp_get_attributes() failed\n"); + goto err_get_attr; + } + priv->bpid = dpbp_attrs.bpid; + + return 0; + +err_get_attr: + dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); +err_enable: +err_reset: + dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); +err_open: + fsl_mc_object_free(dpbp_dev); + + return err; +} + +static void free_dpbp(struct dpaa2_eth_priv *priv) +{ + drain_pool(priv); + dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); + dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); + fsl_mc_object_free(priv->dpbp_dev); +} + +static int set_buffer_layout(struct dpaa2_eth_priv *priv) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_buffer_layout buf_layout = {0}; + int err; + + /* We need to check for WRIOP version 1.0.0, but depending on the MC + * version, this number is not always provided correctly on rev1. + * We need to check for both alternatives in this situation. + */ + if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || + priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) + priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; + else + priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; + + /* tx buffer */ + buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; + buf_layout.pass_timestamp = true; + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX, &buf_layout); + if (err) { + dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); + return err; + } + + /* tx-confirm buffer */ + buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX_CONFIRM, &buf_layout); + if (err) { + dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); + return err; + } + + /* Now that we've set our tx buffer layout, retrieve the minimum + * required tx data offset. + */ + err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, + &priv->tx_data_offset); + if (err) { + dev_err(dev, "dpni_get_tx_data_offset() failed\n"); + return err; + } + + if ((priv->tx_data_offset % 64) != 0) + dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", + priv->tx_data_offset); + + /* rx buffer */ + buf_layout.pass_frame_status = true; + buf_layout.pass_parser_result = true; + buf_layout.data_align = priv->rx_buf_align; + buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); + buf_layout.private_data_size = 0; + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | + DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX, &buf_layout); + if (err) { + dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); + return err; + } + + return 0; +} + +/* Configure the DPNI object this interface is associated with */ +static int setup_dpni(struct fsl_mc_device *ls_dev) +{ + struct device *dev = &ls_dev->dev; + struct dpaa2_eth_priv *priv; + struct net_device *net_dev; + int err; + + net_dev = dev_get_drvdata(dev); + priv = netdev_priv(net_dev); + + /* get a handle for the DPNI object */ + err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); + if (err) { + dev_err(dev, "dpni_open() failed\n"); + return err; + } + + /* Check if we can work with this DPNI object */ + err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, + &priv->dpni_ver_minor); + if (err) { + dev_err(dev, "dpni_get_api_version() failed\n"); + goto close; + } + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { + dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", + priv->dpni_ver_major, priv->dpni_ver_minor, + DPNI_VER_MAJOR, DPNI_VER_MINOR); + err = -ENOTSUPP; + goto close; + } + + ls_dev->mc_io = priv->mc_io; + ls_dev->mc_handle = priv->mc_token; + + err = dpni_reset(priv->mc_io, 0, priv->mc_token); + if (err) { + dev_err(dev, "dpni_reset() failed\n"); + goto close; + } + + err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, + &priv->dpni_attrs); + if (err) { + dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); + goto close; + } + + err = set_buffer_layout(priv); + if (err) + goto close; + + priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * + dpaa2_eth_fs_count(priv), GFP_KERNEL); + if (!priv->cls_rules) + goto close; + + return 0; + +close: + dpni_close(priv->mc_io, 0, priv->mc_token); + + return err; +} + +static void free_dpni(struct dpaa2_eth_priv *priv) +{ + int err; + + err = dpni_reset(priv->mc_io, 0, priv->mc_token); + if (err) + netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", + err); + + dpni_close(priv->mc_io, 0, priv->mc_token); +} + +static int setup_rx_flow(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_queue queue; + struct dpni_queue_id qid; + struct dpni_taildrop td; + int err; + + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid); + if (err) { + dev_err(dev, "dpni_get_queue(RX) failed\n"); + return err; + } + + fq->fqid = qid.fqid; + + queue.destination.id = fq->channel->dpcon_id; + queue.destination.type = DPNI_DEST_DPCON; + queue.destination.priority = 1; + queue.user_context = (u64)(uintptr_t)fq; + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX, 0, fq->flowid, + DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, + &queue); + if (err) { + dev_err(dev, "dpni_set_queue(RX) failed\n"); + return err; + } + + td.enable = 1; + td.threshold = DPAA2_ETH_TAILDROP_THRESH; + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE, + DPNI_QUEUE_RX, 0, fq->flowid, &td); + if (err) { + dev_err(dev, "dpni_set_threshold() failed\n"); + return err; + } + + return 0; +} + +static int setup_tx_flow(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_queue queue; + struct dpni_queue_id qid; + int err; + + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid); + if (err) { + dev_err(dev, "dpni_get_queue(TX) failed\n"); + return err; + } + + fq->tx_qdbin = qid.qdbin; + + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, + &queue, &qid); + if (err) { + dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); + return err; + } + + fq->fqid = qid.fqid; + + queue.destination.id = fq->channel->dpcon_id; + queue.destination.type = DPNI_DEST_DPCON; + queue.destination.priority = 0; + queue.user_context = (u64)(uintptr_t)fq; + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, + DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, + &queue); + if (err) { + dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); + return err; + } + + return 0; +} + +/* Supported header fields for Rx hash distribution key */ +static const struct dpaa2_eth_dist_fields dist_fields[] = { + { + /* L2 header */ + .rxnfc_field = RXH_L2DA, + .cls_prot = NET_PROT_ETH, + .cls_field = NH_FLD_ETH_DA, + .size = 6, + }, { + .cls_prot = NET_PROT_ETH, + .cls_field = NH_FLD_ETH_SA, + .size = 6, + }, { + /* This is the last ethertype field parsed: + * depending on frame format, it can be the MAC ethertype + * or the VLAN etype. + */ + .cls_prot = NET_PROT_ETH, + .cls_field = NH_FLD_ETH_TYPE, + .size = 2, + }, { + /* VLAN header */ + .rxnfc_field = RXH_VLAN, + .cls_prot = NET_PROT_VLAN, + .cls_field = NH_FLD_VLAN_TCI, + .size = 2, + }, { + /* IP header */ + .rxnfc_field = RXH_IP_SRC, + .cls_prot = NET_PROT_IP, + .cls_field = NH_FLD_IP_SRC, + .size = 4, + }, { + .rxnfc_field = RXH_IP_DST, + .cls_prot = NET_PROT_IP, + .cls_field = NH_FLD_IP_DST, + .size = 4, + }, { + .rxnfc_field = RXH_L3_PROTO, + .cls_prot = NET_PROT_IP, + .cls_field = NH_FLD_IP_PROTO, + .size = 1, + }, { + /* Using UDP ports, this is functionally equivalent to raw + * byte pairs from L4 header. + */ + .rxnfc_field = RXH_L4_B_0_1, + .cls_prot = NET_PROT_UDP, + .cls_field = NH_FLD_UDP_PORT_SRC, + .size = 2, + }, { + .rxnfc_field = RXH_L4_B_2_3, + .cls_prot = NET_PROT_UDP, + .cls_field = NH_FLD_UDP_PORT_DST, + .size = 2, + }, +}; + +/* Configure the Rx hash key using the legacy API */ +static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_rx_tc_dist_cfg dist_cfg; + int err; + + memset(&dist_cfg, 0, sizeof(dist_cfg)); + + dist_cfg.key_cfg_iova = key; + dist_cfg.dist_size = dpaa2_eth_queue_count(priv); + dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; + + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); + if (err) + dev_err(dev, "dpni_set_rx_tc_dist failed\n"); + + return err; +} + +/* Configure the Rx hash key using the new API */ +static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_rx_dist_cfg dist_cfg; + int err; + + memset(&dist_cfg, 0, sizeof(dist_cfg)); + + dist_cfg.key_cfg_iova = key; + dist_cfg.dist_size = dpaa2_eth_queue_count(priv); + dist_cfg.enable = 1; + + err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); + if (err) + dev_err(dev, "dpni_set_rx_hash_dist failed\n"); + + return err; +} + +/* Configure the Rx flow classification key */ +static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_rx_dist_cfg dist_cfg; + int err; + + memset(&dist_cfg, 0, sizeof(dist_cfg)); + + dist_cfg.key_cfg_iova = key; + dist_cfg.dist_size = dpaa2_eth_queue_count(priv); + dist_cfg.enable = 1; + + err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); + if (err) + dev_err(dev, "dpni_set_rx_fs_dist failed\n"); + + return err; +} + +/* Size of the Rx flow classification key */ +int dpaa2_eth_cls_key_size(void) +{ + int i, size = 0; + + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) + size += dist_fields[i].size; + + return size; +} + +/* Offset of header field in Rx classification key */ +int dpaa2_eth_cls_fld_off(int prot, int field) +{ + int i, off = 0; + + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { + if (dist_fields[i].cls_prot == prot && + dist_fields[i].cls_field == field) + return off; + off += dist_fields[i].size; + } + + WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n"); + return 0; +} + +/* Set Rx distribution (hash or flow classification) key + * flags is a combination of RXH_ bits + */ +static int dpaa2_eth_set_dist_key(struct net_device *net_dev, + enum dpaa2_eth_rx_dist type, u64 flags) +{ + struct device *dev = net_dev->dev.parent; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpkg_profile_cfg cls_cfg; + u32 rx_hash_fields = 0; + dma_addr_t key_iova; + u8 *dma_mem; + int i; + int err = 0; + + memset(&cls_cfg, 0, sizeof(cls_cfg)); + + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { + struct dpkg_extract *key = + &cls_cfg.extracts[cls_cfg.num_extracts]; + + /* For Rx hashing key we set only the selected fields. + * For Rx flow classification key we set all supported fields + */ + if (type == DPAA2_ETH_RX_DIST_HASH) { + if (!(flags & dist_fields[i].rxnfc_field)) + continue; + rx_hash_fields |= dist_fields[i].rxnfc_field; + } + + if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { + dev_err(dev, "error adding key extraction rule, too many rules?\n"); + return -E2BIG; + } + + key->type = DPKG_EXTRACT_FROM_HDR; + key->extract.from_hdr.prot = dist_fields[i].cls_prot; + key->extract.from_hdr.type = DPKG_FULL_FIELD; + key->extract.from_hdr.field = dist_fields[i].cls_field; + cls_cfg.num_extracts++; + } + + dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); + if (!dma_mem) + return -ENOMEM; + + err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); + if (err) { + dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); + goto free_key; + } + + /* Prepare for setting the rx dist */ + key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, key_iova)) { + dev_err(dev, "DMA mapping failed\n"); + err = -ENOMEM; + goto free_key; + } + + if (type == DPAA2_ETH_RX_DIST_HASH) { + if (dpaa2_eth_has_legacy_dist(priv)) + err = config_legacy_hash_key(priv, key_iova); + else + err = config_hash_key(priv, key_iova); + } else { + err = config_cls_key(priv, key_iova); + } + + dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, + DMA_TO_DEVICE); + if (!err && type == DPAA2_ETH_RX_DIST_HASH) + priv->rx_hash_fields = rx_hash_fields; + +free_key: + kfree(dma_mem); + return err; +} + +int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + if (!dpaa2_eth_hash_enabled(priv)) + return -EOPNOTSUPP; + + return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags); +} + +static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv) +{ + struct device *dev = priv->net_dev->dev.parent; + + /* Check if we actually support Rx flow classification */ + if (dpaa2_eth_has_legacy_dist(priv)) { + dev_dbg(dev, "Rx cls not supported by current MC version\n"); + return -EOPNOTSUPP; + } + + if (priv->dpni_attrs.options & DPNI_OPT_NO_FS || + !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) { + dev_dbg(dev, "Rx cls disabled in DPNI options\n"); + return -EOPNOTSUPP; + } + + if (!dpaa2_eth_hash_enabled(priv)) { + dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); + return -EOPNOTSUPP; + } + + priv->rx_cls_enabled = 1; + + return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0); +} + +/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, + * frame queues and channels + */ +static int bind_dpni(struct dpaa2_eth_priv *priv) +{ + struct net_device *net_dev = priv->net_dev; + struct device *dev = net_dev->dev.parent; + struct dpni_pools_cfg pools_params; + struct dpni_error_cfg err_cfg; + int err = 0; + int i; + + pools_params.num_dpbp = 1; + pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; + pools_params.pools[0].backup_pool = 0; + pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) { + dev_err(dev, "dpni_set_pools() failed\n"); + return err; + } + + /* have the interface implicitly distribute traffic based on + * the default hash key + */ + err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); + if (err && err != -EOPNOTSUPP) + dev_err(dev, "Failed to configure hashing\n"); + + /* Configure the flow classification key; it includes all + * supported header fields and cannot be modified at runtime + */ + err = dpaa2_eth_set_cls(priv); + if (err && err != -EOPNOTSUPP) + dev_err(dev, "Failed to configure Rx classification key\n"); + + /* Configure handling of error frames */ + err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; + err_cfg.set_frame_annotation = 1; + err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; + err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, + &err_cfg); + if (err) { + dev_err(dev, "dpni_set_errors_behavior failed\n"); + return err; + } + + /* Configure Rx and Tx conf queues to generate CDANs */ + for (i = 0; i < priv->num_fqs; i++) { + switch (priv->fq[i].type) { + case DPAA2_RX_FQ: + err = setup_rx_flow(priv, &priv->fq[i]); + break; + case DPAA2_TX_CONF_FQ: + err = setup_tx_flow(priv, &priv->fq[i]); + break; + default: + dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); + return -EINVAL; + } + if (err) + return err; + } + + err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX, &priv->tx_qdid); + if (err) { + dev_err(dev, "dpni_get_qdid() failed\n"); + return err; + } + + return 0; +} + +/* Allocate rings for storing incoming frame descriptors */ +static int alloc_rings(struct dpaa2_eth_priv *priv) +{ + struct net_device *net_dev = priv->net_dev; + struct device *dev = net_dev->dev.parent; + int i; + + for (i = 0; i < priv->num_channels; i++) { + priv->channel[i]->store = + dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); + if (!priv->channel[i]->store) { + netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); + goto err_ring; + } + } + + return 0; + +err_ring: + for (i = 0; i < priv->num_channels; i++) { + if (!priv->channel[i]->store) + break; + dpaa2_io_store_destroy(priv->channel[i]->store); + } + + return -ENOMEM; +} + +static void free_rings(struct dpaa2_eth_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_channels; i++) + dpaa2_io_store_destroy(priv->channel[i]->store); +} + +static int set_mac_addr(struct dpaa2_eth_priv *priv) +{ + struct net_device *net_dev = priv->net_dev; + struct device *dev = net_dev->dev.parent; + u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; + int err; + + /* Get firmware address, if any */ + err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); + if (err) { + dev_err(dev, "dpni_get_port_mac_addr() failed\n"); + return err; + } + + /* Get DPNI attributes address, if any */ + err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, + dpni_mac_addr); + if (err) { + dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); + return err; + } + + /* First check if firmware has any address configured by bootloader */ + if (!is_zero_ether_addr(mac_addr)) { + /* If the DPMAC addr != DPNI addr, update it */ + if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { + err = dpni_set_primary_mac_addr(priv->mc_io, 0, + priv->mc_token, + mac_addr); + if (err) { + dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); + return err; + } + } + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + } else if (is_zero_ether_addr(dpni_mac_addr)) { + /* No MAC address configured, fill in net_dev->dev_addr + * with a random one + */ + eth_hw_addr_random(net_dev); + dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); + + err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, + net_dev->dev_addr); + if (err) { + dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); + return err; + } + + /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all + * practical purposes, this will be our "permanent" mac address, + * at least until the next reboot. This move will also permit + * register_netdevice() to properly fill up net_dev->perm_addr. + */ + net_dev->addr_assign_type = NET_ADDR_PERM; + } else { + /* NET_ADDR_PERM is default, all we have to do is + * fill in the device addr. + */ + memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); + } + + return 0; +} + +static int netdev_init(struct net_device *net_dev) +{ + struct device *dev = net_dev->dev.parent; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + u32 options = priv->dpni_attrs.options; + u64 supported = 0, not_supported = 0; + u8 bcast_addr[ETH_ALEN]; + u8 num_queues; + int err; + + net_dev->netdev_ops = &dpaa2_eth_ops; + net_dev->ethtool_ops = &dpaa2_ethtool_ops; + + err = set_mac_addr(priv); + if (err) + return err; + + /* Explicitly add the broadcast address to the MAC filtering table */ + eth_broadcast_addr(bcast_addr); + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); + if (err) { + dev_err(dev, "dpni_add_mac_addr() failed\n"); + return err; + } + + /* Set MTU upper limit; lower limit is 68B (default value) */ + net_dev->max_mtu = DPAA2_ETH_MAX_MTU; + err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, + DPAA2_ETH_MFL); + if (err) { + dev_err(dev, "dpni_set_max_frame_length() failed\n"); + return err; + } + + /* Set actual number of queues in the net device */ + num_queues = dpaa2_eth_queue_count(priv); + err = netif_set_real_num_tx_queues(net_dev, num_queues); + if (err) { + dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); + return err; + } + err = netif_set_real_num_rx_queues(net_dev, num_queues); + if (err) { + dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); + return err; + } + + /* Capabilities listing */ + supported |= IFF_LIVE_ADDR_CHANGE; + + if (options & DPNI_OPT_NO_MAC_FILTER) + not_supported |= IFF_UNICAST_FLT; + else + supported |= IFF_UNICAST_FLT; + + net_dev->priv_flags |= supported; + net_dev->priv_flags &= ~not_supported; + + /* Features */ + net_dev->features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_LLTX; + net_dev->hw_features = net_dev->features; + + return 0; +} + +static int poll_link_state(void *arg) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; + int err; + + while (!kthread_should_stop()) { + err = link_state_update(priv); + if (unlikely(err)) + return err; + + msleep(DPAA2_ETH_LINK_STATE_REFRESH); + } + + return 0; +} + +static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) +{ + u32 status = ~0; + struct device *dev = (struct device *)arg; + struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); + struct net_device *net_dev = dev_get_drvdata(dev); + int err; + + err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, + DPNI_IRQ_INDEX, &status); + if (unlikely(err)) { + netdev_err(net_dev, "Can't get irq status (err %d)\n", err); + return IRQ_HANDLED; + } + + if (status & DPNI_IRQ_EVENT_LINK_CHANGED) + link_state_update(netdev_priv(net_dev)); + + return IRQ_HANDLED; +} + +static int setup_irqs(struct fsl_mc_device *ls_dev) +{ + int err = 0; + struct fsl_mc_device_irq *irq; + + err = fsl_mc_allocate_irqs(ls_dev); + if (err) { + dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); + return err; + } + + irq = ls_dev->irqs[0]; + err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, + NULL, dpni_irq0_handler_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(&ls_dev->dev), &ls_dev->dev); + if (err < 0) { + dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); + goto free_mc_irq; + } + + err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, + DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED); + if (err < 0) { + dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); + goto free_irq; + } + + err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, + DPNI_IRQ_INDEX, 1); + if (err < 0) { + dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); + goto free_irq; + } + + return 0; + +free_irq: + devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); +free_mc_irq: + fsl_mc_free_irqs(ls_dev); + + return err; +} + +static void add_ch_napi(struct dpaa2_eth_priv *priv) +{ + int i; + struct dpaa2_eth_channel *ch; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ + netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, + NAPI_POLL_WEIGHT); + } +} + +static void del_ch_napi(struct dpaa2_eth_priv *priv) +{ + int i; + struct dpaa2_eth_channel *ch; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + netif_napi_del(&ch->napi); + } +} + +static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) +{ + struct device *dev; + struct net_device *net_dev = NULL; + struct dpaa2_eth_priv *priv = NULL; + int err = 0; + + dev = &dpni_dev->dev; + + /* Net device */ + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); + if (!net_dev) { + dev_err(dev, "alloc_etherdev_mq() failed\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(net_dev, dev); + dev_set_drvdata(dev, net_dev); + + priv = netdev_priv(net_dev); + priv->net_dev = net_dev; + + priv->iommu_domain = iommu_get_domain_for_dev(dev); + + /* Obtain a MC portal */ + err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, + &priv->mc_io); + if (err) { + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "MC portal allocation failed\n"); + goto err_portal_alloc; + } + + /* MC objects initialization and configuration */ + err = setup_dpni(dpni_dev); + if (err) + goto err_dpni_setup; + + err = setup_dpio(priv); + if (err) + goto err_dpio_setup; + + setup_fqs(priv); + + err = setup_dpbp(priv); + if (err) + goto err_dpbp_setup; + + err = bind_dpni(priv); + if (err) + goto err_bind; + + /* Add a NAPI context for each channel */ + add_ch_napi(priv); + + /* Percpu statistics */ + priv->percpu_stats = alloc_percpu(*priv->percpu_stats); + if (!priv->percpu_stats) { + dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); + err = -ENOMEM; + goto err_alloc_percpu_stats; + } + priv->percpu_extras = alloc_percpu(*priv->percpu_extras); + if (!priv->percpu_extras) { + dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); + err = -ENOMEM; + goto err_alloc_percpu_extras; + } + + err = netdev_init(net_dev); + if (err) + goto err_netdev_init; + + /* Configure checksum offload based on current interface flags */ + err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); + if (err) + goto err_csum; + + err = set_tx_csum(priv, !!(net_dev->features & + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); + if (err) + goto err_csum; + + err = alloc_rings(priv); + if (err) + goto err_alloc_rings; + + err = setup_irqs(dpni_dev); + if (err) { + netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); + priv->poll_thread = kthread_run(poll_link_state, priv, + "%s_poll_link", net_dev->name); + if (IS_ERR(priv->poll_thread)) { + dev_err(dev, "Error starting polling thread\n"); + goto err_poll_thread; + } + priv->do_link_poll = true; + } + + err = register_netdev(net_dev); + if (err < 0) { + dev_err(dev, "register_netdev() failed\n"); + goto err_netdev_reg; + } + + dev_info(dev, "Probed interface %s\n", net_dev->name); + return 0; + +err_netdev_reg: + if (priv->do_link_poll) + kthread_stop(priv->poll_thread); + else + fsl_mc_free_irqs(dpni_dev); +err_poll_thread: + free_rings(priv); +err_alloc_rings: +err_csum: +err_netdev_init: + free_percpu(priv->percpu_extras); +err_alloc_percpu_extras: + free_percpu(priv->percpu_stats); +err_alloc_percpu_stats: + del_ch_napi(priv); +err_bind: + free_dpbp(priv); +err_dpbp_setup: + free_dpio(priv); +err_dpio_setup: + free_dpni(priv); +err_dpni_setup: + fsl_mc_portal_free(priv->mc_io); +err_portal_alloc: + dev_set_drvdata(dev, NULL); + free_netdev(net_dev); + + return err; +} + +static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) +{ + struct device *dev; + struct net_device *net_dev; + struct dpaa2_eth_priv *priv; + + dev = &ls_dev->dev; + net_dev = dev_get_drvdata(dev); + priv = netdev_priv(net_dev); + + unregister_netdev(net_dev); + + if (priv->do_link_poll) + kthread_stop(priv->poll_thread); + else + fsl_mc_free_irqs(ls_dev); + + free_rings(priv); + free_percpu(priv->percpu_stats); + free_percpu(priv->percpu_extras); + + del_ch_napi(priv); + free_dpbp(priv); + free_dpio(priv); + free_dpni(priv); + + fsl_mc_portal_free(priv->mc_io); + + free_netdev(net_dev); + + dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); + + return 0; +} + +static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dpni", + }, + { .vendor = 0x0 } +}; +MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); + +static struct fsl_mc_driver dpaa2_eth_driver = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = dpaa2_eth_probe, + .remove = dpaa2_eth_remove, + .match_id_table = dpaa2_eth_match_id_table +}; + +module_fsl_mc_driver(dpaa2_eth_driver); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h new file mode 100644 index 000000000000..452a8e9c4f0e --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -0,0 +1,446 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + */ + +#ifndef __DPAA2_ETH_H +#define __DPAA2_ETH_H + +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/fsl/mc.h> + +#include <soc/fsl/dpaa2-io.h> +#include <soc/fsl/dpaa2-fd.h> +#include "dpni.h" +#include "dpni-cmd.h" + +#include "dpaa2-eth-trace.h" + +#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0) + +#define DPAA2_ETH_STORE_SIZE 16 + +/* Maximum number of scatter-gather entries in an ingress frame, + * considering the maximum receive frame size is 64K + */ +#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) + +/* Maximum acceptable MTU value. It is in direct relation with the hardware + * enforced Max Frame Length (currently 10k). + */ +#define DPAA2_ETH_MFL (10 * 1024) +#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) +/* Convert L3 MTU to L2 MFL */ +#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN) + +/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo + * frames in the Rx queues (length of the current frame is not + * taken into account when making the taildrop decision) + */ +#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) + +/* Maximum number of Tx confirmation frames to be processed + * in a single NAPI call + */ +#define DPAA2_ETH_TXCONF_PER_NAPI 256 + +/* Buffer quota per queue. Must be large enough such that for minimum sized + * frames taildrop kicks in before the bpool gets depleted, so we compute + * how many 64B frames fit inside the taildrop threshold and add a margin + * to accommodate the buffer refill delay. + */ +#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) +#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) +#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE + +/* Maximum number of buffers that can be acquired/released through a single + * QBMan command + */ +#define DPAA2_ETH_BUFS_PER_CMD 7 + +/* Hardware requires alignment for ingress/egress buffer addresses */ +#define DPAA2_ETH_TX_BUF_ALIGN 64 + +#define DPAA2_ETH_RX_BUF_SIZE 2048 +#define DPAA2_ETH_SKB_SIZE \ + (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +/* Hardware annotation area in RX/TX buffers */ +#define DPAA2_ETH_RX_HWA_SIZE 64 +#define DPAA2_ETH_TX_HWA_SIZE 128 + +/* PTP nominal frequency 1GHz */ +#define DPAA2_PTP_CLK_PERIOD_NS 1 + +/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned + * to 256B. For newer revisions, the requirement is only for 64B alignment + */ +#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256 +#define DPAA2_ETH_RX_BUF_ALIGN 64 + +/* We are accommodating a skb backpointer and some S/G info + * in the frame's software annotation. The hardware + * options are either 0 or 64, so we choose the latter. + */ +#define DPAA2_ETH_SWA_SIZE 64 + +/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ +struct dpaa2_eth_swa { + struct sk_buff *skb; + struct scatterlist *scl; + int num_sg; + int sgt_size; +}; + +/* Annotation valid bits in FD FRC */ +#define DPAA2_FD_FRC_FASV 0x8000 +#define DPAA2_FD_FRC_FAEADV 0x4000 +#define DPAA2_FD_FRC_FAPRV 0x2000 +#define DPAA2_FD_FRC_FAIADV 0x1000 +#define DPAA2_FD_FRC_FASWOV 0x0800 +#define DPAA2_FD_FRC_FAICFDV 0x0400 + +/* Error bits in FD CTRL */ +#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR) +#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \ + FD_CTRL_SBE | \ + FD_CTRL_FSE | \ + FD_CTRL_FAERR) + +/* Annotation bits in FD CTRL */ +#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128B */ + +/* Frame annotation status */ +struct dpaa2_fas { + u8 reserved; + u8 ppid; + __le16 ifpid; + __le32 status; +}; + +/* Frame annotation status word is located in the first 8 bytes + * of the buffer's hardware annoatation area + */ +#define DPAA2_FAS_OFFSET 0 +#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas)) + +/* Timestamp is located in the next 8 bytes of the buffer's + * hardware annotation area + */ +#define DPAA2_TS_OFFSET 0x8 + +/* Frame annotation egress action descriptor */ +#define DPAA2_FAEAD_OFFSET 0x58 + +struct dpaa2_faead { + __le32 conf_fqid; + __le32 ctrl; +}; + +#define DPAA2_FAEAD_A2V 0x20000000 +#define DPAA2_FAEAD_UPDV 0x00001000 +#define DPAA2_FAEAD_UPD 0x00000010 + +/* Accessors for the hardware annotation fields that we use */ +static inline void *dpaa2_get_hwa(void *buf_addr, bool swa) +{ + return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0); +} + +static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa) +{ + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET; +} + +static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa) +{ + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET; +} + +static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa) +{ + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET; +} + +/* Error and status bits in the frame annotation status word */ +/* Debug frame, otherwise supposed to be discarded */ +#define DPAA2_FAS_DISC 0x80000000 +/* MACSEC frame */ +#define DPAA2_FAS_MS 0x40000000 +#define DPAA2_FAS_PTP 0x08000000 +/* Ethernet multicast frame */ +#define DPAA2_FAS_MC 0x04000000 +/* Ethernet broadcast frame */ +#define DPAA2_FAS_BC 0x02000000 +#define DPAA2_FAS_KSE 0x00040000 +#define DPAA2_FAS_EOFHE 0x00020000 +#define DPAA2_FAS_MNLE 0x00010000 +#define DPAA2_FAS_TIDE 0x00008000 +#define DPAA2_FAS_PIEE 0x00004000 +/* Frame length error */ +#define DPAA2_FAS_FLE 0x00002000 +/* Frame physical error */ +#define DPAA2_FAS_FPE 0x00001000 +#define DPAA2_FAS_PTE 0x00000080 +#define DPAA2_FAS_ISP 0x00000040 +#define DPAA2_FAS_PHE 0x00000020 +#define DPAA2_FAS_BLE 0x00000010 +/* L3 csum validation performed */ +#define DPAA2_FAS_L3CV 0x00000008 +/* L3 csum error */ +#define DPAA2_FAS_L3CE 0x00000004 +/* L4 csum validation performed */ +#define DPAA2_FAS_L4CV 0x00000002 +/* L4 csum error */ +#define DPAA2_FAS_L4CE 0x00000001 +/* Possible errors on the ingress path */ +#define DPAA2_FAS_RX_ERR_MASK (DPAA2_FAS_KSE | \ + DPAA2_FAS_EOFHE | \ + DPAA2_FAS_MNLE | \ + DPAA2_FAS_TIDE | \ + DPAA2_FAS_PIEE | \ + DPAA2_FAS_FLE | \ + DPAA2_FAS_FPE | \ + DPAA2_FAS_PTE | \ + DPAA2_FAS_ISP | \ + DPAA2_FAS_PHE | \ + DPAA2_FAS_BLE | \ + DPAA2_FAS_L3CE | \ + DPAA2_FAS_L4CE) + +/* Time in milliseconds between link state updates */ +#define DPAA2_ETH_LINK_STATE_REFRESH 1000 + +/* Number of times to retry a frame enqueue before giving up. + * Value determined empirically, in order to minimize the number + * of frames dropped on Tx + */ +#define DPAA2_ETH_ENQUEUE_RETRIES 10 + +/* Driver statistics, other than those in struct rtnl_link_stats64. + * These are usually collected per-CPU and aggregated by ethtool. + */ +struct dpaa2_eth_drv_stats { + __u64 tx_conf_frames; + __u64 tx_conf_bytes; + __u64 tx_sg_frames; + __u64 tx_sg_bytes; + __u64 tx_reallocs; + __u64 rx_sg_frames; + __u64 rx_sg_bytes; + /* Enqueues retried due to portal busy */ + __u64 tx_portal_busy; +}; + +/* Per-FQ statistics */ +struct dpaa2_eth_fq_stats { + /* Number of frames received on this queue */ + __u64 frames; +}; + +/* Per-channel statistics */ +struct dpaa2_eth_ch_stats { + /* Volatile dequeues retried due to portal busy */ + __u64 dequeue_portal_busy; + /* Number of CDANs; useful to estimate avg NAPI len */ + __u64 cdan; + /* Number of frames received on queues from this channel */ + __u64 frames; + /* Pull errors */ + __u64 pull_err; +}; + +/* Maximum number of queues associated with a DPNI */ +#define DPAA2_ETH_MAX_RX_QUEUES 16 +#define DPAA2_ETH_MAX_TX_QUEUES 16 +#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ + DPAA2_ETH_MAX_TX_QUEUES) + +#define DPAA2_ETH_MAX_DPCONS 16 + +enum dpaa2_eth_fq_type { + DPAA2_RX_FQ = 0, + DPAA2_TX_CONF_FQ, +}; + +struct dpaa2_eth_priv; + +struct dpaa2_eth_fq { + u32 fqid; + u32 tx_qdbin; + u16 flowid; + int target_cpu; + struct dpaa2_eth_channel *channel; + enum dpaa2_eth_fq_type type; + + void (*consume)(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct napi_struct *napi, + u16 queue_id); + struct dpaa2_eth_fq_stats stats; +}; + +struct dpaa2_eth_channel { + struct dpaa2_io_notification_ctx nctx; + struct fsl_mc_device *dpcon; + int dpcon_id; + int ch_id; + struct napi_struct napi; + struct dpaa2_io *dpio; + struct dpaa2_io_store *store; + struct dpaa2_eth_priv *priv; + int buf_count; + struct dpaa2_eth_ch_stats stats; +}; + +struct dpaa2_eth_dist_fields { + u64 rxnfc_field; + enum net_prot cls_prot; + int cls_field; + int size; +}; + +struct dpaa2_eth_cls_rule { + struct ethtool_rx_flow_spec fs; + u8 in_use; +}; + +/* Driver private data */ +struct dpaa2_eth_priv { + struct net_device *net_dev; + + u8 num_fqs; + struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; + + u8 num_channels; + struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; + + struct dpni_attr dpni_attrs; + u16 dpni_ver_major; + u16 dpni_ver_minor; + u16 tx_data_offset; + + struct fsl_mc_device *dpbp_dev; + u16 bpid; + struct iommu_domain *iommu_domain; + + bool tx_tstamp; /* Tx timestamping enabled */ + bool rx_tstamp; /* Rx timestamping enabled */ + + u16 tx_qdid; + u16 rx_buf_align; + struct fsl_mc_io *mc_io; + /* Cores which have an affine DPIO/DPCON. + * This is the cpu set on which Rx and Tx conf frames are processed + */ + struct cpumask dpio_cpumask; + + /* Standard statistics */ + struct rtnl_link_stats64 __percpu *percpu_stats; + /* Extra stats, in addition to the ones known by the kernel */ + struct dpaa2_eth_drv_stats __percpu *percpu_extras; + + u16 mc_token; + + struct dpni_link_state link_state; + bool do_link_poll; + struct task_struct *poll_thread; + + /* enabled ethtool hashing bits */ + u64 rx_hash_fields; + struct dpaa2_eth_cls_rule *cls_rules; + u8 rx_cls_enabled; +}; + +#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ + | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \ + | RXH_L4_B_2_3) + +/* default Rx hash options, set during probing */ +#define DPAA2_RXH_DEFAULT (RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \ + RXH_L4_B_0_1 | RXH_L4_B_2_3) + +#define dpaa2_eth_hash_enabled(priv) \ + ((priv)->dpni_attrs.num_queues > 1) + +/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */ +#define DPAA2_CLASSIFIER_DMA_SIZE 256 + +extern const struct ethtool_ops dpaa2_ethtool_ops; +extern int dpaa2_phc_index; + +static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv, + u16 ver_major, u16 ver_minor) +{ + if (priv->dpni_ver_major == ver_major) + return priv->dpni_ver_minor - ver_minor; + return priv->dpni_ver_major - ver_major; +} + +/* Minimum firmware version that supports a more flexible API + * for configuring the Rx flow hash key + */ +#define DPNI_RX_DIST_KEY_VER_MAJOR 7 +#define DPNI_RX_DIST_KEY_VER_MINOR 5 + +#define dpaa2_eth_has_legacy_dist(priv) \ + (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \ + DPNI_RX_DIST_KEY_VER_MINOR) < 0) + +#define dpaa2_eth_fs_count(priv) \ + ((priv)->dpni_attrs.fs_entries) + +enum dpaa2_eth_rx_dist { + DPAA2_ETH_RX_DIST_HASH, + DPAA2_ETH_RX_DIST_CLS +}; + +/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around + * the buffer also needs space for its shared info struct, and we need + * to allocate enough to accommodate hardware alignment restrictions + */ +static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv) +{ + return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align; +} + +static inline +unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, + struct sk_buff *skb) +{ + unsigned int headroom = DPAA2_ETH_SWA_SIZE; + + /* For non-linear skbs we have no headroom requirement, as we build a + * SG frame with a newly allocated SGT buffer + */ + if (skb_is_nonlinear(skb)) + return 0; + + /* If we have Tx timestamping, need 128B hardware annotation */ + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + headroom += DPAA2_ETH_TX_HWA_SIZE; + + return headroom; +} + +/* Extra headroom space requested to hardware, in order to make sure there's + * no realloc'ing in forwarding scenarios + */ +static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) +{ + return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - + DPAA2_ETH_RX_HWA_SIZE; +} + +static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) +{ + return priv->dpni_attrs.num_queues; +} + +int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); +int dpaa2_eth_cls_key_size(void); +int dpaa2_eth_cls_fld_off(int prot, int field); + +#endif /* __DPAA2_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c new file mode 100644 index 000000000000..26bd5a2bd8ed --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -0,0 +1,630 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + */ + +#include <linux/net_tstamp.h> + +#include "dpni.h" /* DPNI_LINK_OPT_* */ +#include "dpaa2-eth.h" + +/* To be kept in sync with DPNI statistics */ +static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { + "[hw] rx frames", + "[hw] rx bytes", + "[hw] rx mcast frames", + "[hw] rx mcast bytes", + "[hw] rx bcast frames", + "[hw] rx bcast bytes", + "[hw] tx frames", + "[hw] tx bytes", + "[hw] tx mcast frames", + "[hw] tx mcast bytes", + "[hw] tx bcast frames", + "[hw] tx bcast bytes", + "[hw] rx filtered frames", + "[hw] rx discarded frames", + "[hw] rx nobuffer discards", + "[hw] tx discarded frames", + "[hw] tx confirmed frames", +}; + +#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) + +static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { + /* per-cpu stats */ + "[drv] tx conf frames", + "[drv] tx conf bytes", + "[drv] tx sg frames", + "[drv] tx sg bytes", + "[drv] tx realloc frames", + "[drv] rx sg frames", + "[drv] rx sg bytes", + "[drv] enqueue portal busy", + /* Channel stats */ + "[drv] dequeue portal busy", + "[drv] channel pull errors", + "[drv] cdan", +}; + +#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) + +static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *drvinfo) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor); + + strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), + sizeof(drvinfo->bus_info)); +} + +static int +dpaa2_eth_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *link_settings) +{ + struct dpni_link_state state = {0}; + int err = 0; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); + if (err) { + netdev_err(net_dev, "ERROR %d getting link state\n", err); + goto out; + } + + /* At the moment, we have no way of interrogating the DPMAC + * from the DPNI side - and for that matter there may exist + * no DPMAC at all. So for now we just don't report anything + * beyond the DPNI attributes. + */ + if (state.options & DPNI_LINK_OPT_AUTONEG) + link_settings->base.autoneg = AUTONEG_ENABLE; + if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX)) + link_settings->base.duplex = DUPLEX_FULL; + link_settings->base.speed = state.rate; + +out: + return err; +} + +#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7 +#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1 +static int +dpaa2_eth_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *link_settings) +{ + struct dpni_link_cfg cfg = {0}; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err = 0; + + /* If using an older MC version, the DPNI must be down + * in order to be able to change link settings. Taking steps to let + * the user know that. + */ + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR, + DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) { + if (netif_running(net_dev)) { + netdev_info(net_dev, "Interface must be brought down first.\n"); + return -EACCES; + } + } + + cfg.rate = link_settings->base.speed; + if (link_settings->base.autoneg == AUTONEG_ENABLE) + cfg.options |= DPNI_LINK_OPT_AUTONEG; + else + cfg.options &= ~DPNI_LINK_OPT_AUTONEG; + if (link_settings->base.duplex == DUPLEX_HALF) + cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX; + else + cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX; + + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); + if (err) + /* ethtool will be loud enough if we return an error; no point + * in putting our own error message on the console by default + */ + netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err); + + return err; +} + +static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { + strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { + strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ + return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; + default: + return -EOPNOTSUPP; + } +} + +/** Fill in hardware counters, as returned by MC. + */ +static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, + struct ethtool_stats *stats, + u64 *data) +{ + int i = 0; + int j, k, err; + int num_cnt; + union dpni_statistics dpni_stats; + u64 cdan = 0; + u64 portal_busy = 0, pull_err = 0; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpaa2_eth_drv_stats *extras; + struct dpaa2_eth_ch_stats *ch_stats; + + memset(data, 0, + sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); + + /* Print standard counters, from DPNI statistics */ + for (j = 0; j <= 2; j++) { + err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, + j, &dpni_stats); + if (err != 0) + netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j); + switch (j) { + case 0: + num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64); + break; + case 1: + num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64); + break; + case 2: + num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64); + break; + } + for (k = 0; k < num_cnt; k++) + *(data + i++) = dpni_stats.raw.counter[k]; + } + + /* Print per-cpu extra stats */ + for_each_online_cpu(k) { + extras = per_cpu_ptr(priv->percpu_extras, k); + for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) + *((__u64 *)data + i + j) += *((__u64 *)extras + j); + } + i += j; + + for (j = 0; j < priv->num_channels; j++) { + ch_stats = &priv->channel[j]->stats; + cdan += ch_stats->cdan; + portal_busy += ch_stats->dequeue_portal_busy; + pull_err += ch_stats->pull_err; + } + + *(data + i++) = portal_busy; + *(data + i++) = pull_err; + *(data + i++) = cdan; +} + +static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, + void *key, void *mask) +{ + int off; + + if (eth_mask->h_proto) { + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); + *(__be16 *)(key + off) = eth_value->h_proto; + *(__be16 *)(mask + off) = eth_mask->h_proto; + } + + if (!is_zero_ether_addr(eth_mask->h_source)) { + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA); + ether_addr_copy(key + off, eth_value->h_source); + ether_addr_copy(mask + off, eth_mask->h_source); + } + + if (!is_zero_ether_addr(eth_mask->h_dest)) { + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA); + ether_addr_copy(key + off, eth_value->h_dest); + ether_addr_copy(mask + off, eth_mask->h_dest); + } + + return 0; +} + +static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value, + struct ethtool_usrip4_spec *uip_mask, + void *key, void *mask) +{ + int off; + u32 tmp_value, tmp_mask; + + if (uip_mask->tos || uip_mask->ip_ver) + return -EOPNOTSUPP; + + if (uip_mask->ip4src) { + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC); + *(__be32 *)(key + off) = uip_value->ip4src; + *(__be32 *)(mask + off) = uip_mask->ip4src; + } + + if (uip_mask->ip4dst) { + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST); + *(__be32 *)(key + off) = uip_value->ip4dst; + *(__be32 *)(mask + off) = uip_mask->ip4dst; + } + + if (uip_mask->proto) { + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO); + *(u8 *)(key + off) = uip_value->proto; + *(u8 *)(mask + off) = uip_mask->proto; + } + + if (uip_mask->l4_4_bytes) { + tmp_value = be32_to_cpu(uip_value->l4_4_bytes); + tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes); + + off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); + *(__be16 *)(key + off) = htons(tmp_value >> 16); + *(__be16 *)(mask + off) = htons(tmp_mask >> 16); + + off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST); + *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF); + *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF); + } + + /* Only apply the rule for IPv4 frames */ + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); + *(__be16 *)(key + off) = htons(ETH_P_IP); + *(__be16 *)(mask + off) = htons(0xFFFF); + + return 0; +} + +static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value, + struct ethtool_tcpip4_spec *l4_mask, + void *key, void *mask, u8 l4_proto) +{ + int off; + + if (l4_mask->tos) + return -EOPNOTSUPP; + + if (l4_mask->ip4src) { + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC); + *(__be32 *)(key + off) = l4_value->ip4src; + *(__be32 *)(mask + off) = l4_mask->ip4src; + } + + if (l4_mask->ip4dst) { + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST); + *(__be32 *)(key + off) = l4_value->ip4dst; + *(__be32 *)(mask + off) = l4_mask->ip4dst; + } + + if (l4_mask->psrc) { + off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); + *(__be16 *)(key + off) = l4_value->psrc; + *(__be16 *)(mask + off) = l4_mask->psrc; + } + + if (l4_mask->pdst) { + off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST); + *(__be16 *)(key + off) = l4_value->pdst; + *(__be16 *)(mask + off) = l4_mask->pdst; + } + + /* Only apply the rule for IPv4 frames with the specified L4 proto */ + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); + *(__be16 *)(key + off) = htons(ETH_P_IP); + *(__be16 *)(mask + off) = htons(0xFFFF); + + off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO); + *(u8 *)(key + off) = l4_proto; + *(u8 *)(mask + off) = 0xFF; + + return 0; +} + +static int prep_ext_rule(struct ethtool_flow_ext *ext_value, + struct ethtool_flow_ext *ext_mask, + void *key, void *mask) +{ + int off; + + if (ext_mask->vlan_etype) + return -EOPNOTSUPP; + + if (ext_mask->vlan_tci) { + off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI); + *(__be16 *)(key + off) = ext_value->vlan_tci; + *(__be16 *)(mask + off) = ext_mask->vlan_tci; + } + + return 0; +} + +static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value, + struct ethtool_flow_ext *ext_mask, + void *key, void *mask) +{ + int off; + + if (!is_zero_ether_addr(ext_mask->h_dest)) { + off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA); + ether_addr_copy(key + off, ext_value->h_dest); + ether_addr_copy(mask + off, ext_mask->h_dest); + } + + return 0; +} + +static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask) +{ + int err; + + switch (fs->flow_type & 0xFF) { + case ETHER_FLOW: + err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec, + key, mask); + break; + case IP_USER_FLOW: + err = prep_uip_rule(&fs->h_u.usr_ip4_spec, + &fs->m_u.usr_ip4_spec, key, mask); + break; + case TCP_V4_FLOW: + err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec, + key, mask, IPPROTO_TCP); + break; + case UDP_V4_FLOW: + err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec, + key, mask, IPPROTO_UDP); + break; + case SCTP_V4_FLOW: + err = prep_l4_rule(&fs->h_u.sctp_ip4_spec, + &fs->m_u.sctp_ip4_spec, key, mask, + IPPROTO_SCTP); + break; + default: + return -EOPNOTSUPP; + } + + if (err) + return err; + + if (fs->flow_type & FLOW_EXT) { + err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask); + if (err) + return err; + } + + if (fs->flow_type & FLOW_MAC_EXT) { + err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask); + if (err) + return err; + } + + return 0; +} + +static int do_cls_rule(struct net_device *net_dev, + struct ethtool_rx_flow_spec *fs, + bool add) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct device *dev = net_dev->dev.parent; + struct dpni_rule_cfg rule_cfg = { 0 }; + struct dpni_fs_action_cfg fs_act = { 0 }; + dma_addr_t key_iova; + void *key_buf; + int err; + + if (fs->ring_cookie != RX_CLS_FLOW_DISC && + fs->ring_cookie >= dpaa2_eth_queue_count(priv)) + return -EINVAL; + + rule_cfg.key_size = dpaa2_eth_cls_key_size(); + + /* allocate twice the key size, for the actual key and for mask */ + key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL); + if (!key_buf) + return -ENOMEM; + + /* Fill the key and mask memory areas */ + err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size); + if (err) + goto free_mem; + + key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, key_iova)) { + err = -ENOMEM; + goto free_mem; + } + + rule_cfg.key_iova = key_iova; + rule_cfg.mask_iova = key_iova + rule_cfg.key_size; + + if (add) { + if (fs->ring_cookie == RX_CLS_FLOW_DISC) + fs_act.options |= DPNI_FS_OPT_DISCARD; + else + fs_act.flow_id = fs->ring_cookie; + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, + fs->location, &rule_cfg, &fs_act); + } else { + err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, + &rule_cfg); + } + + dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE); + +free_mem: + kfree(key_buf); + + return err; +} + +static int update_cls_rule(struct net_device *net_dev, + struct ethtool_rx_flow_spec *new_fs, + int location) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpaa2_eth_cls_rule *rule; + int err = -EINVAL; + + if (!priv->rx_cls_enabled) + return -EOPNOTSUPP; + + if (location >= dpaa2_eth_fs_count(priv)) + return -EINVAL; + + rule = &priv->cls_rules[location]; + + /* If a rule is present at the specified location, delete it. */ + if (rule->in_use) { + err = do_cls_rule(net_dev, &rule->fs, false); + if (err) + return err; + + rule->in_use = 0; + } + + /* If no new entry to add, return here */ + if (!new_fs) + return err; + + err = do_cls_rule(net_dev, new_fs, true); + if (err) + return err; + + rule->in_use = 1; + rule->fs = *new_fs; + + return 0; +} + +static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *rxnfc, u32 *rule_locs) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int max_rules = dpaa2_eth_fs_count(priv); + int i, j = 0; + + switch (rxnfc->cmd) { + case ETHTOOL_GRXFH: + /* we purposely ignore cmd->flow_type for now, because the + * classifier only supports a single set of fields for all + * protocols + */ + rxnfc->data = priv->rx_hash_fields; + break; + case ETHTOOL_GRXRINGS: + rxnfc->data = dpaa2_eth_queue_count(priv); + break; + case ETHTOOL_GRXCLSRLCNT: + rxnfc->rule_cnt = 0; + for (i = 0; i < max_rules; i++) + if (priv->cls_rules[i].in_use) + rxnfc->rule_cnt++; + rxnfc->data = max_rules; + break; + case ETHTOOL_GRXCLSRULE: + if (rxnfc->fs.location >= max_rules) + return -EINVAL; + if (!priv->cls_rules[rxnfc->fs.location].in_use) + return -EINVAL; + rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs; + break; + case ETHTOOL_GRXCLSRLALL: + for (i = 0; i < max_rules; i++) { + if (!priv->cls_rules[i].in_use) + continue; + if (j == rxnfc->rule_cnt) + return -EMSGSIZE; + rule_locs[j++] = i; + } + rxnfc->rule_cnt = j; + rxnfc->data = max_rules; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *rxnfc) +{ + int err = 0; + + switch (rxnfc->cmd) { + case ETHTOOL_SRXFH: + if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data) + return -EOPNOTSUPP; + err = dpaa2_eth_set_hash(net_dev, rxnfc->data); + break; + case ETHTOOL_SRXCLSRLINS: + err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location); + break; + case ETHTOOL_SRXCLSRLDEL: + err = update_cls_rule(net_dev, NULL, rxnfc->fs.location); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + +int dpaa2_phc_index = -1; +EXPORT_SYMBOL(dpaa2_phc_index); + +static int dpaa2_eth_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->phc_index = dpaa2_phc_index; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + return 0; +} + +const struct ethtool_ops dpaa2_ethtool_ops = { + .get_drvinfo = dpaa2_eth_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = dpaa2_eth_get_link_ksettings, + .set_link_ksettings = dpaa2_eth_set_link_ksettings, + .get_sset_count = dpaa2_eth_get_sset_count, + .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, + .get_strings = dpaa2_eth_get_strings, + .get_rxnfc = dpaa2_eth_get_rxnfc, + .set_rxnfc = dpaa2_eth_set_rxnfc, + .get_ts_info = dpaa2_eth_get_ts_info, +}; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c new file mode 100644 index 000000000000..84b942b1eccc --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2018 NXP + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/fsl/mc.h> + +#include "dpaa2-ptp.h" + +struct ptp_dpaa2_priv { + struct fsl_mc_device *ptp_mc_dev; + struct ptp_clock *clock; + struct ptp_clock_info caps; + u32 freq_comp; +}; + +/* PTP clock operations */ +static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct ptp_dpaa2_priv *ptp_dpaa2 = + container_of(ptp, struct ptp_dpaa2_priv, caps); + struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev; + struct device *dev = &mc_dev->dev; + u64 adj; + u32 diff, tmr_add; + int neg_adj = 0; + int err = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + tmr_add = ptp_dpaa2->freq_comp; + adj = tmr_add; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); + + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; + + err = dprtc_set_freq_compensation(mc_dev->mc_io, 0, + mc_dev->mc_handle, tmr_add); + if (err) + dev_err(dev, "dprtc_set_freq_compensation err %d\n", err); + return err; +} + +static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct ptp_dpaa2_priv *ptp_dpaa2 = + container_of(ptp, struct ptp_dpaa2_priv, caps); + struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev; + struct device *dev = &mc_dev->dev; + s64 now; + int err = 0; + + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now); + if (err) { + dev_err(dev, "dprtc_get_time err %d\n", err); + return err; + } + + now += delta; + + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now); + if (err) + dev_err(dev, "dprtc_set_time err %d\n", err); + return err; +} + +static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct ptp_dpaa2_priv *ptp_dpaa2 = + container_of(ptp, struct ptp_dpaa2_priv, caps); + struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev; + struct device *dev = &mc_dev->dev; + u64 ns; + u32 remainder; + int err = 0; + + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns); + if (err) { + dev_err(dev, "dprtc_get_time err %d\n", err); + return err; + } + + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); + ts->tv_nsec = remainder; + return err; +} + +static int ptp_dpaa2_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ptp_dpaa2_priv *ptp_dpaa2 = + container_of(ptp, struct ptp_dpaa2_priv, caps); + struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev; + struct device *dev = &mc_dev->dev; + u64 ns; + int err = 0; + + ns = ts->tv_sec * 1000000000ULL; + ns += ts->tv_nsec; + + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns); + if (err) + dev_err(dev, "dprtc_set_time err %d\n", err); + return err; +} + +static const struct ptp_clock_info ptp_dpaa2_caps = { + .owner = THIS_MODULE, + .name = "DPAA2 PTP Clock", + .max_adj = 512000, + .n_alarm = 2, + .n_ext_ts = 2, + .n_per_out = 3, + .n_pins = 0, + .pps = 1, + .adjfreq = ptp_dpaa2_adjfreq, + .adjtime = ptp_dpaa2_adjtime, + .gettime64 = ptp_dpaa2_gettime, + .settime64 = ptp_dpaa2_settime, +}; + +static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) +{ + struct device *dev = &mc_dev->dev; + struct ptp_dpaa2_priv *ptp_dpaa2; + u32 tmr_add = 0; + int err; + + ptp_dpaa2 = devm_kzalloc(dev, sizeof(*ptp_dpaa2), GFP_KERNEL); + if (!ptp_dpaa2) + return -ENOMEM; + + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); + if (err) { + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); + goto err_exit; + } + + err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, + &mc_dev->mc_handle); + if (err) { + dev_err(dev, "dprtc_open err %d\n", err); + goto err_free_mcp; + } + + ptp_dpaa2->ptp_mc_dev = mc_dev; + + err = dprtc_get_freq_compensation(mc_dev->mc_io, 0, + mc_dev->mc_handle, &tmr_add); + if (err) { + dev_err(dev, "dprtc_get_freq_compensation err %d\n", err); + goto err_close; + } + + ptp_dpaa2->freq_comp = tmr_add; + ptp_dpaa2->caps = ptp_dpaa2_caps; + + ptp_dpaa2->clock = ptp_clock_register(&ptp_dpaa2->caps, dev); + if (IS_ERR(ptp_dpaa2->clock)) { + err = PTR_ERR(ptp_dpaa2->clock); + goto err_close; + } + + dpaa2_phc_index = ptp_clock_index(ptp_dpaa2->clock); + + dev_set_drvdata(dev, ptp_dpaa2); + + return 0; + +err_close: + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); +err_free_mcp: + fsl_mc_portal_free(mc_dev->mc_io); +err_exit: + return err; +} + +static int dpaa2_ptp_remove(struct fsl_mc_device *mc_dev) +{ + struct ptp_dpaa2_priv *ptp_dpaa2; + struct device *dev = &mc_dev->dev; + + ptp_dpaa2 = dev_get_drvdata(dev); + ptp_clock_unregister(ptp_dpaa2->clock); + + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); + fsl_mc_portal_free(mc_dev->mc_io); + + return 0; +} + +static const struct fsl_mc_device_id dpaa2_ptp_match_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dprtc", + }, + {} +}; +MODULE_DEVICE_TABLE(fslmc, dpaa2_ptp_match_id_table); + +static struct fsl_mc_driver dpaa2_ptp_drv = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = dpaa2_ptp_probe, + .remove = dpaa2_ptp_remove, + .match_id_table = dpaa2_ptp_match_id_table, +}; + +module_fsl_mc_driver(dpaa2_ptp_drv); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DPAA2 PTP Clock Driver"); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h new file mode 100644 index 000000000000..ff2e177395d4 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2018 NXP + */ + +#ifndef __RTC_H +#define __RTC_H + +#include "dprtc.h" +#include "dprtc-cmd.h" + +extern int dpaa2_phc_index; + +#endif diff --git a/drivers/net/ethernet/freescale/dpaa2/dpkg.h b/drivers/net/ethernet/freescale/dpaa2/dpkg.h new file mode 100644 index 000000000000..6de613b13e4d --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpkg.h @@ -0,0 +1,480 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2013-2015 Freescale Semiconductor Inc. + */ +#ifndef __FSL_DPKG_H_ +#define __FSL_DPKG_H_ + +#include <linux/types.h> + +/* Data Path Key Generator API + * Contains initialization APIs and runtime APIs for the Key Generator + */ + +/** Key Generator properties */ + +/** + * Number of masks per key extraction + */ +#define DPKG_NUM_OF_MASKS 4 +/** + * Number of extractions per key profile + */ +#define DPKG_MAX_NUM_OF_EXTRACTS 10 + +/** + * enum dpkg_extract_from_hdr_type - Selecting extraction by header types + * @DPKG_FROM_HDR: Extract selected bytes from header, by offset + * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field + * @DPKG_FULL_FIELD: Extract a full field + */ +enum dpkg_extract_from_hdr_type { + DPKG_FROM_HDR = 0, + DPKG_FROM_FIELD = 1, + DPKG_FULL_FIELD = 2 +}; + +/** + * enum dpkg_extract_type - Enumeration for selecting extraction type + * @DPKG_EXTRACT_FROM_HDR: Extract from the header + * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header + * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; + * e.g. can be used to extract header existence; + * please refer to 'Parse Result definition' section in the parser BG + */ +enum dpkg_extract_type { + DPKG_EXTRACT_FROM_HDR = 0, + DPKG_EXTRACT_FROM_DATA = 1, + DPKG_EXTRACT_FROM_PARSE = 3 +}; + +/** + * struct dpkg_mask - A structure for defining a single extraction mask + * @mask: Byte mask for the extracted content + * @offset: Offset within the extracted content + */ +struct dpkg_mask { + u8 mask; + u8 offset; +}; + +/* Protocol fields */ + +/* Ethernet fields */ +#define NH_FLD_ETH_DA BIT(0) +#define NH_FLD_ETH_SA BIT(1) +#define NH_FLD_ETH_LENGTH BIT(2) +#define NH_FLD_ETH_TYPE BIT(3) +#define NH_FLD_ETH_FINAL_CKSUM BIT(4) +#define NH_FLD_ETH_PADDING BIT(5) +#define NH_FLD_ETH_ALL_FIELDS (BIT(6) - 1) + +/* VLAN fields */ +#define NH_FLD_VLAN_VPRI BIT(0) +#define NH_FLD_VLAN_CFI BIT(1) +#define NH_FLD_VLAN_VID BIT(2) +#define NH_FLD_VLAN_LENGTH BIT(3) +#define NH_FLD_VLAN_TYPE BIT(4) +#define NH_FLD_VLAN_ALL_FIELDS (BIT(5) - 1) + +#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ + NH_FLD_VLAN_CFI | \ + NH_FLD_VLAN_VID) + +/* IP (generic) fields */ +#define NH_FLD_IP_VER BIT(0) +#define NH_FLD_IP_DSCP BIT(2) +#define NH_FLD_IP_ECN BIT(3) +#define NH_FLD_IP_PROTO BIT(4) +#define NH_FLD_IP_SRC BIT(5) +#define NH_FLD_IP_DST BIT(6) +#define NH_FLD_IP_TOS_TC BIT(7) +#define NH_FLD_IP_ID BIT(8) +#define NH_FLD_IP_ALL_FIELDS (BIT(9) - 1) + +/* IPV4 fields */ +#define NH_FLD_IPV4_VER BIT(0) +#define NH_FLD_IPV4_HDR_LEN BIT(1) +#define NH_FLD_IPV4_TOS BIT(2) +#define NH_FLD_IPV4_TOTAL_LEN BIT(3) +#define NH_FLD_IPV4_ID BIT(4) +#define NH_FLD_IPV4_FLAG_D BIT(5) +#define NH_FLD_IPV4_FLAG_M BIT(6) +#define NH_FLD_IPV4_OFFSET BIT(7) +#define NH_FLD_IPV4_TTL BIT(8) +#define NH_FLD_IPV4_PROTO BIT(9) +#define NH_FLD_IPV4_CKSUM BIT(10) +#define NH_FLD_IPV4_SRC_IP BIT(11) +#define NH_FLD_IPV4_DST_IP BIT(12) +#define NH_FLD_IPV4_OPTS BIT(13) +#define NH_FLD_IPV4_OPTS_COUNT BIT(14) +#define NH_FLD_IPV4_ALL_FIELDS (BIT(15) - 1) + +/* IPV6 fields */ +#define NH_FLD_IPV6_VER BIT(0) +#define NH_FLD_IPV6_TC BIT(1) +#define NH_FLD_IPV6_SRC_IP BIT(2) +#define NH_FLD_IPV6_DST_IP BIT(3) +#define NH_FLD_IPV6_NEXT_HDR BIT(4) +#define NH_FLD_IPV6_FL BIT(5) +#define NH_FLD_IPV6_HOP_LIMIT BIT(6) +#define NH_FLD_IPV6_ID BIT(7) +#define NH_FLD_IPV6_ALL_FIELDS (BIT(8) - 1) + +/* ICMP fields */ +#define NH_FLD_ICMP_TYPE BIT(0) +#define NH_FLD_ICMP_CODE BIT(1) +#define NH_FLD_ICMP_CKSUM BIT(2) +#define NH_FLD_ICMP_ID BIT(3) +#define NH_FLD_ICMP_SQ_NUM BIT(4) +#define NH_FLD_ICMP_ALL_FIELDS (BIT(5) - 1) + +/* IGMP fields */ +#define NH_FLD_IGMP_VERSION BIT(0) +#define NH_FLD_IGMP_TYPE BIT(1) +#define NH_FLD_IGMP_CKSUM BIT(2) +#define NH_FLD_IGMP_DATA BIT(3) +#define NH_FLD_IGMP_ALL_FIELDS (BIT(4) - 1) + +/* TCP fields */ +#define NH_FLD_TCP_PORT_SRC BIT(0) +#define NH_FLD_TCP_PORT_DST BIT(1) +#define NH_FLD_TCP_SEQ BIT(2) +#define NH_FLD_TCP_ACK BIT(3) +#define NH_FLD_TCP_OFFSET BIT(4) +#define NH_FLD_TCP_FLAGS BIT(5) +#define NH_FLD_TCP_WINDOW BIT(6) +#define NH_FLD_TCP_CKSUM BIT(7) +#define NH_FLD_TCP_URGPTR BIT(8) +#define NH_FLD_TCP_OPTS BIT(9) +#define NH_FLD_TCP_OPTS_COUNT BIT(10) +#define NH_FLD_TCP_ALL_FIELDS (BIT(11) - 1) + +/* UDP fields */ +#define NH_FLD_UDP_PORT_SRC BIT(0) +#define NH_FLD_UDP_PORT_DST BIT(1) +#define NH_FLD_UDP_LEN BIT(2) +#define NH_FLD_UDP_CKSUM BIT(3) +#define NH_FLD_UDP_ALL_FIELDS (BIT(4) - 1) + +/* UDP-lite fields */ +#define NH_FLD_UDP_LITE_PORT_SRC BIT(0) +#define NH_FLD_UDP_LITE_PORT_DST BIT(1) +#define NH_FLD_UDP_LITE_ALL_FIELDS (BIT(2) - 1) + +/* UDP-encap-ESP fields */ +#define NH_FLD_UDP_ENC_ESP_PORT_SRC BIT(0) +#define NH_FLD_UDP_ENC_ESP_PORT_DST BIT(1) +#define NH_FLD_UDP_ENC_ESP_LEN BIT(2) +#define NH_FLD_UDP_ENC_ESP_CKSUM BIT(3) +#define NH_FLD_UDP_ENC_ESP_SPI BIT(4) +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM BIT(5) +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS (BIT(6) - 1) + +/* SCTP fields */ +#define NH_FLD_SCTP_PORT_SRC BIT(0) +#define NH_FLD_SCTP_PORT_DST BIT(1) +#define NH_FLD_SCTP_VER_TAG BIT(2) +#define NH_FLD_SCTP_CKSUM BIT(3) +#define NH_FLD_SCTP_ALL_FIELDS (BIT(4) - 1) + +/* DCCP fields */ +#define NH_FLD_DCCP_PORT_SRC BIT(0) +#define NH_FLD_DCCP_PORT_DST BIT(1) +#define NH_FLD_DCCP_ALL_FIELDS (BIT(2) - 1) + +/* IPHC fields */ +#define NH_FLD_IPHC_CID BIT(0) +#define NH_FLD_IPHC_CID_TYPE BIT(1) +#define NH_FLD_IPHC_HCINDEX BIT(2) +#define NH_FLD_IPHC_GEN BIT(3) +#define NH_FLD_IPHC_D_BIT BIT(4) +#define NH_FLD_IPHC_ALL_FIELDS (BIT(5) - 1) + +/* SCTP fields */ +#define NH_FLD_SCTP_CHUNK_DATA_TYPE BIT(0) +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS BIT(1) +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH BIT(2) +#define NH_FLD_SCTP_CHUNK_DATA_TSN BIT(3) +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID BIT(4) +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN BIT(5) +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID BIT(6) +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED BIT(7) +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING BIT(8) +#define NH_FLD_SCTP_CHUNK_DATA_END BIT(9) +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS (BIT(10) - 1) + +/* L2TPV2 fields */ +#define NH_FLD_L2TPV2_TYPE_BIT BIT(0) +#define NH_FLD_L2TPV2_LENGTH_BIT BIT(1) +#define NH_FLD_L2TPV2_SEQUENCE_BIT BIT(2) +#define NH_FLD_L2TPV2_OFFSET_BIT BIT(3) +#define NH_FLD_L2TPV2_PRIORITY_BIT BIT(4) +#define NH_FLD_L2TPV2_VERSION BIT(5) +#define NH_FLD_L2TPV2_LEN BIT(6) +#define NH_FLD_L2TPV2_TUNNEL_ID BIT(7) +#define NH_FLD_L2TPV2_SESSION_ID BIT(8) +#define NH_FLD_L2TPV2_NS BIT(9) +#define NH_FLD_L2TPV2_NR BIT(10) +#define NH_FLD_L2TPV2_OFFSET_SIZE BIT(11) +#define NH_FLD_L2TPV2_FIRST_BYTE BIT(12) +#define NH_FLD_L2TPV2_ALL_FIELDS (BIT(13) - 1) + +/* L2TPV3 fields */ +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT BIT(0) +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT BIT(1) +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT BIT(2) +#define NH_FLD_L2TPV3_CTRL_VERSION BIT(3) +#define NH_FLD_L2TPV3_CTRL_LENGTH BIT(4) +#define NH_FLD_L2TPV3_CTRL_CONTROL BIT(5) +#define NH_FLD_L2TPV3_CTRL_SENT BIT(6) +#define NH_FLD_L2TPV3_CTRL_RECV BIT(7) +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE BIT(8) +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS (BIT(9) - 1) + +#define NH_FLD_L2TPV3_SESS_TYPE_BIT BIT(0) +#define NH_FLD_L2TPV3_SESS_VERSION BIT(1) +#define NH_FLD_L2TPV3_SESS_ID BIT(2) +#define NH_FLD_L2TPV3_SESS_COOKIE BIT(3) +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS (BIT(4) - 1) + +/* PPP fields */ +#define NH_FLD_PPP_PID BIT(0) +#define NH_FLD_PPP_COMPRESSED BIT(1) +#define NH_FLD_PPP_ALL_FIELDS (BIT(2) - 1) + +/* PPPoE fields */ +#define NH_FLD_PPPOE_VER BIT(0) +#define NH_FLD_PPPOE_TYPE BIT(1) +#define NH_FLD_PPPOE_CODE BIT(2) +#define NH_FLD_PPPOE_SID BIT(3) +#define NH_FLD_PPPOE_LEN BIT(4) +#define NH_FLD_PPPOE_SESSION BIT(5) +#define NH_FLD_PPPOE_PID BIT(6) +#define NH_FLD_PPPOE_ALL_FIELDS (BIT(7) - 1) + +/* PPP-Mux fields */ +#define NH_FLD_PPPMUX_PID BIT(0) +#define NH_FLD_PPPMUX_CKSUM BIT(1) +#define NH_FLD_PPPMUX_COMPRESSED BIT(2) +#define NH_FLD_PPPMUX_ALL_FIELDS (BIT(3) - 1) + +/* PPP-Mux sub-frame fields */ +#define NH_FLD_PPPMUX_SUBFRM_PFF BIT(0) +#define NH_FLD_PPPMUX_SUBFRM_LXT BIT(1) +#define NH_FLD_PPPMUX_SUBFRM_LEN BIT(2) +#define NH_FLD_PPPMUX_SUBFRM_PID BIT(3) +#define NH_FLD_PPPMUX_SUBFRM_USE_PID BIT(4) +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS (BIT(5) - 1) + +/* LLC fields */ +#define NH_FLD_LLC_DSAP BIT(0) +#define NH_FLD_LLC_SSAP BIT(1) +#define NH_FLD_LLC_CTRL BIT(2) +#define NH_FLD_LLC_ALL_FIELDS (BIT(3) - 1) + +/* NLPID fields */ +#define NH_FLD_NLPID_NLPID BIT(0) +#define NH_FLD_NLPID_ALL_FIELDS (BIT(1) - 1) + +/* SNAP fields */ +#define NH_FLD_SNAP_OUI BIT(0) +#define NH_FLD_SNAP_PID BIT(1) +#define NH_FLD_SNAP_ALL_FIELDS (BIT(2) - 1) + +/* LLC SNAP fields */ +#define NH_FLD_LLC_SNAP_TYPE BIT(0) +#define NH_FLD_LLC_SNAP_ALL_FIELDS (BIT(1) - 1) + +/* ARP fields */ +#define NH_FLD_ARP_HTYPE BIT(0) +#define NH_FLD_ARP_PTYPE BIT(1) +#define NH_FLD_ARP_HLEN BIT(2) +#define NH_FLD_ARP_PLEN BIT(3) +#define NH_FLD_ARP_OPER BIT(4) +#define NH_FLD_ARP_SHA BIT(5) +#define NH_FLD_ARP_SPA BIT(6) +#define NH_FLD_ARP_THA BIT(7) +#define NH_FLD_ARP_TPA BIT(8) +#define NH_FLD_ARP_ALL_FIELDS (BIT(9) - 1) + +/* RFC2684 fields */ +#define NH_FLD_RFC2684_LLC BIT(0) +#define NH_FLD_RFC2684_NLPID BIT(1) +#define NH_FLD_RFC2684_OUI BIT(2) +#define NH_FLD_RFC2684_PID BIT(3) +#define NH_FLD_RFC2684_VPN_OUI BIT(4) +#define NH_FLD_RFC2684_VPN_IDX BIT(5) +#define NH_FLD_RFC2684_ALL_FIELDS (BIT(6) - 1) + +/* User defined fields */ +#define NH_FLD_USER_DEFINED_SRCPORT BIT(0) +#define NH_FLD_USER_DEFINED_PCDID BIT(1) +#define NH_FLD_USER_DEFINED_ALL_FIELDS (BIT(2) - 1) + +/* Payload fields */ +#define NH_FLD_PAYLOAD_BUFFER BIT(0) +#define NH_FLD_PAYLOAD_SIZE BIT(1) +#define NH_FLD_MAX_FRM_SIZE BIT(2) +#define NH_FLD_MIN_FRM_SIZE BIT(3) +#define NH_FLD_PAYLOAD_TYPE BIT(4) +#define NH_FLD_FRAME_SIZE BIT(5) +#define NH_FLD_PAYLOAD_ALL_FIELDS (BIT(6) - 1) + +/* GRE fields */ +#define NH_FLD_GRE_TYPE BIT(0) +#define NH_FLD_GRE_ALL_FIELDS (BIT(1) - 1) + +/* MINENCAP fields */ +#define NH_FLD_MINENCAP_SRC_IP BIT(0) +#define NH_FLD_MINENCAP_DST_IP BIT(1) +#define NH_FLD_MINENCAP_TYPE BIT(2) +#define NH_FLD_MINENCAP_ALL_FIELDS (BIT(3) - 1) + +/* IPSEC AH fields */ +#define NH_FLD_IPSEC_AH_SPI BIT(0) +#define NH_FLD_IPSEC_AH_NH BIT(1) +#define NH_FLD_IPSEC_AH_ALL_FIELDS (BIT(2) - 1) + +/* IPSEC ESP fields */ +#define NH_FLD_IPSEC_ESP_SPI BIT(0) +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM BIT(1) +#define NH_FLD_IPSEC_ESP_ALL_FIELDS (BIT(2) - 1) + +/* MPLS fields */ +#define NH_FLD_MPLS_LABEL_STACK BIT(0) +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS (BIT(1) - 1) + +/* MACSEC fields */ +#define NH_FLD_MACSEC_SECTAG BIT(0) +#define NH_FLD_MACSEC_ALL_FIELDS (BIT(1) - 1) + +/* GTP fields */ +#define NH_FLD_GTP_TEID BIT(0) + +/* Supported protocols */ +enum net_prot { + NET_PROT_NONE = 0, + NET_PROT_PAYLOAD, + NET_PROT_ETH, + NET_PROT_VLAN, + NET_PROT_IPV4, + NET_PROT_IPV6, + NET_PROT_IP, + NET_PROT_TCP, + NET_PROT_UDP, + NET_PROT_UDP_LITE, + NET_PROT_IPHC, + NET_PROT_SCTP, + NET_PROT_SCTP_CHUNK_DATA, + NET_PROT_PPPOE, + NET_PROT_PPP, + NET_PROT_PPPMUX, + NET_PROT_PPPMUX_SUBFRM, + NET_PROT_L2TPV2, + NET_PROT_L2TPV3_CTRL, + NET_PROT_L2TPV3_SESS, + NET_PROT_LLC, + NET_PROT_LLC_SNAP, + NET_PROT_NLPID, + NET_PROT_SNAP, + NET_PROT_MPLS, + NET_PROT_IPSEC_AH, + NET_PROT_IPSEC_ESP, + NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ + NET_PROT_MACSEC, + NET_PROT_GRE, + NET_PROT_MINENCAP, + NET_PROT_DCCP, + NET_PROT_ICMP, + NET_PROT_IGMP, + NET_PROT_ARP, + NET_PROT_CAPWAP_DATA, + NET_PROT_CAPWAP_CTRL, + NET_PROT_RFC2684, + NET_PROT_ICMPV6, + NET_PROT_FCOE, + NET_PROT_FIP, + NET_PROT_ISCSI, + NET_PROT_GTP, + NET_PROT_USER_DEFINED_L2, + NET_PROT_USER_DEFINED_L3, + NET_PROT_USER_DEFINED_L4, + NET_PROT_USER_DEFINED_L5, + NET_PROT_USER_DEFINED_SHIM1, + NET_PROT_USER_DEFINED_SHIM2, + + NET_PROT_DUMMY_LAST +}; + +/** + * struct dpkg_extract - A structure for defining a single extraction + * @type: Determines how the union below is interpreted: + * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; + * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; + * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' + * @extract: Selects extraction method + * @extract.from_hdr: Used when 'type = DPKG_EXTRACT_FROM_HDR' + * @extract.from_data: Used when 'type = DPKG_EXTRACT_FROM_DATA' + * @extract.from_parse: Used when 'type = DPKG_EXTRACT_FROM_PARSE' + * @extract.from_hdr.prot: Any of the supported headers + * @extract.from_hdr.type: Defines the type of header extraction: + * DPKG_FROM_HDR: use size & offset below; + * DPKG_FROM_FIELD: use field, size and offset below; + * DPKG_FULL_FIELD: use field below + * @extract.from_hdr.field: One of the supported fields (NH_FLD_) + * @extract.from_hdr.size: Size in bytes + * @extract.from_hdr.offset: Byte offset + * @extract.from_hdr.hdr_index: Clear for cases not listed below; + * Used for protocols that may have more than a single + * header, 0 indicates an outer header; + * Supported protocols (possible values): + * NET_PROT_VLAN (0, HDR_INDEX_LAST); + * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); + * NET_PROT_IP(0, HDR_INDEX_LAST); + * NET_PROT_IPv4(0, HDR_INDEX_LAST); + * NET_PROT_IPv6(0, HDR_INDEX_LAST); + * @extract.from_data.size: Size in bytes + * @extract.from_data.offset: Byte offset + * @extract.from_parse.size: Size in bytes + * @extract.from_parse.offset: Byte offset + * @num_of_byte_masks: Defines the number of valid entries in the array below; + * This is also the number of bytes to be used as masks + * @masks: Masks parameters + */ +struct dpkg_extract { + enum dpkg_extract_type type; + union { + struct { + enum net_prot prot; + enum dpkg_extract_from_hdr_type type; + u32 field; + u8 size; + u8 offset; + u8 hdr_index; + } from_hdr; + struct { + u8 size; + u8 offset; + } from_data; + struct { + u8 size; + u8 offset; + } from_parse; + } extract; + + u8 num_of_byte_masks; + struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; +}; + +/** + * struct dpkg_profile_cfg - A structure for defining a full Key Generation + * profile (rule) + * @num_extracts: Defines the number of valid entries in the array below + * @extracts: Array of required extractions + */ +struct dpkg_profile_cfg { + u8 num_extracts; + struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; +}; + +#endif /* __FSL_DPKG_H_ */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h new file mode 100644 index 000000000000..7b44d7d9b19a --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h @@ -0,0 +1,569 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + */ +#ifndef _FSL_DPNI_CMD_H +#define _FSL_DPNI_CMD_H + +#include "dpni.h" + +/* DPNI Version */ +#define DPNI_VER_MAJOR 7 +#define DPNI_VER_MINOR 0 +#define DPNI_CMD_BASE_VERSION 1 +#define DPNI_CMD_ID_OFFSET 4 + +#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) + +#define DPNI_CMDID_OPEN DPNI_CMD(0x801) +#define DPNI_CMDID_CLOSE DPNI_CMD(0x800) +#define DPNI_CMDID_CREATE DPNI_CMD(0x901) +#define DPNI_CMDID_DESTROY DPNI_CMD(0x900) +#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01) + +#define DPNI_CMDID_ENABLE DPNI_CMD(0x002) +#define DPNI_CMDID_DISABLE DPNI_CMD(0x003) +#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004) +#define DPNI_CMDID_RESET DPNI_CMD(0x005) +#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006) + +#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010) +#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011) +#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012) +#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013) +#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014) +#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015) +#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016) +#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017) + +#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200) +#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B) + +#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210) +#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212) +#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215) +#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216) +#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217) +#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A) +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B) + +#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220) +#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221) +#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222) +#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223) +#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224) +#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225) +#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226) +#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227) +#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228) + +#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235) + +#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244) +#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245) +#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246) + +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D) +#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F) +#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260) +#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261) +#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262) + +#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263) + +#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264) +#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265) + +#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266) +#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267) +#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268) +#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269) +#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A) +#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B) +#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C) + +#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273) +#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274) + +/* Macros for accessing command fields smaller than 1byte */ +#define DPNI_MASK(field) \ + GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \ + DPNI_##field##_SHIFT) + +#define dpni_set_field(var, field, val) \ + ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field))) +#define dpni_get_field(var, field) \ + (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT) + +struct dpni_cmd_open { + __le32 dpni_id; +}; + +#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) +struct dpni_cmd_set_pools { + /* cmd word 0 */ + u8 num_dpbp; + u8 backup_pool_mask; + __le16 pad; + /* cmd word 0..4 */ + __le32 dpbp_id[DPNI_MAX_DPBP]; + /* cmd word 4..6 */ + __le16 buffer_size[DPNI_MAX_DPBP]; +}; + +/* The enable indication is always the least significant bit */ +#define DPNI_ENABLE_SHIFT 0 +#define DPNI_ENABLE_SIZE 1 + +struct dpni_rsp_is_enabled { + u8 enabled; +}; + +struct dpni_rsp_get_irq { + /* response word 0 */ + __le32 irq_val; + __le32 pad; + /* response word 1 */ + __le64 irq_addr; + /* response word 2 */ + __le32 irq_num; + __le32 type; +}; + +struct dpni_cmd_set_irq_enable { + u8 enable; + u8 pad[3]; + u8 irq_index; +}; + +struct dpni_cmd_get_irq_enable { + __le32 pad; + u8 irq_index; +}; + +struct dpni_rsp_get_irq_enable { + u8 enabled; +}; + +struct dpni_cmd_set_irq_mask { + __le32 mask; + u8 irq_index; +}; + +struct dpni_cmd_get_irq_mask { + __le32 pad; + u8 irq_index; +}; + +struct dpni_rsp_get_irq_mask { + __le32 mask; +}; + +struct dpni_cmd_get_irq_status { + __le32 status; + u8 irq_index; +}; + +struct dpni_rsp_get_irq_status { + __le32 status; +}; + +struct dpni_cmd_clear_irq_status { + __le32 status; + u8 irq_index; +}; + +struct dpni_rsp_get_attr { + /* response word 0 */ + __le32 options; + u8 num_queues; + u8 num_tcs; + u8 mac_filter_entries; + u8 pad0; + /* response word 1 */ + u8 vlan_filter_entries; + u8 pad1; + u8 qos_entries; + u8 pad2; + __le16 fs_entries; + __le16 pad3; + /* response word 2 */ + u8 qos_key_size; + u8 fs_key_size; + __le16 wriop_version; +}; + +#define DPNI_ERROR_ACTION_SHIFT 0 +#define DPNI_ERROR_ACTION_SIZE 4 +#define DPNI_FRAME_ANN_SHIFT 4 +#define DPNI_FRAME_ANN_SIZE 1 + +struct dpni_cmd_set_errors_behavior { + __le32 errors; + /* from least significant bit: error_action:4, set_frame_annotation:1 */ + u8 flags; +}; + +/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation + * buffer layouts, but they all share the same parameters. + * If one of the functions changes, below structure needs to be split. + */ + +#define DPNI_PASS_TS_SHIFT 0 +#define DPNI_PASS_TS_SIZE 1 +#define DPNI_PASS_PR_SHIFT 1 +#define DPNI_PASS_PR_SIZE 1 +#define DPNI_PASS_FS_SHIFT 2 +#define DPNI_PASS_FS_SIZE 1 + +struct dpni_cmd_get_buffer_layout { + u8 qtype; +}; + +struct dpni_rsp_get_buffer_layout { + /* response word 0 */ + u8 pad0[6]; + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ + u8 flags; + u8 pad1; + /* response word 1 */ + __le16 private_data_size; + __le16 data_align; + __le16 head_room; + __le16 tail_room; +}; + +struct dpni_cmd_set_buffer_layout { + /* cmd word 0 */ + u8 qtype; + u8 pad0[3]; + __le16 options; + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ + u8 flags; + u8 pad1; + /* cmd word 1 */ + __le16 private_data_size; + __le16 data_align; + __le16 head_room; + __le16 tail_room; +}; + +struct dpni_cmd_set_offload { + u8 pad[3]; + u8 dpni_offload; + __le32 config; +}; + +struct dpni_cmd_get_offload { + u8 pad[3]; + u8 dpni_offload; +}; + +struct dpni_rsp_get_offload { + __le32 pad; + __le32 config; +}; + +struct dpni_cmd_get_qdid { + u8 qtype; +}; + +struct dpni_rsp_get_qdid { + __le16 qdid; +}; + +struct dpni_rsp_get_tx_data_offset { + __le16 data_offset; +}; + +struct dpni_cmd_get_statistics { + u8 page_number; +}; + +struct dpni_rsp_get_statistics { + __le64 counter[DPNI_STATISTICS_CNT]; +}; + +struct dpni_cmd_set_link_cfg { + /* cmd word 0 */ + __le64 pad0; + /* cmd word 1 */ + __le32 rate; + __le32 pad1; + /* cmd word 2 */ + __le64 options; +}; + +#define DPNI_LINK_STATE_SHIFT 0 +#define DPNI_LINK_STATE_SIZE 1 + +struct dpni_rsp_get_link_state { + /* response word 0 */ + __le32 pad0; + /* from LSB: up:1 */ + u8 flags; + u8 pad1[3]; + /* response word 1 */ + __le32 rate; + __le32 pad2; + /* response word 2 */ + __le64 options; +}; + +struct dpni_cmd_set_max_frame_length { + __le16 max_frame_length; +}; + +struct dpni_rsp_get_max_frame_length { + __le16 max_frame_length; +}; + +struct dpni_cmd_set_multicast_promisc { + u8 enable; +}; + +struct dpni_rsp_get_multicast_promisc { + u8 enabled; +}; + +struct dpni_cmd_set_unicast_promisc { + u8 enable; +}; + +struct dpni_rsp_get_unicast_promisc { + u8 enabled; +}; + +struct dpni_cmd_set_primary_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +struct dpni_rsp_get_primary_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +struct dpni_rsp_get_port_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +struct dpni_cmd_add_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +struct dpni_cmd_remove_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +#define DPNI_UNICAST_FILTERS_SHIFT 0 +#define DPNI_UNICAST_FILTERS_SIZE 1 +#define DPNI_MULTICAST_FILTERS_SHIFT 1 +#define DPNI_MULTICAST_FILTERS_SIZE 1 + +struct dpni_cmd_clear_mac_filters { + /* from LSB: unicast:1, multicast:1 */ + u8 flags; +}; + +#define DPNI_DIST_MODE_SHIFT 0 +#define DPNI_DIST_MODE_SIZE 4 +#define DPNI_MISS_ACTION_SHIFT 4 +#define DPNI_MISS_ACTION_SIZE 4 + +struct dpni_cmd_set_rx_tc_dist { + /* cmd word 0 */ + __le16 dist_size; + u8 tc_id; + /* from LSB: dist_mode:4, miss_action:4 */ + u8 flags; + __le16 pad0; + __le16 default_flow_id; + /* cmd word 1..5 */ + __le64 pad1[5]; + /* cmd word 6 */ + __le64 key_cfg_iova; +}; + +/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at + * key_cfg_iova) + */ +struct dpni_mask_cfg { + u8 mask; + u8 offset; +}; + +#define DPNI_EFH_TYPE_SHIFT 0 +#define DPNI_EFH_TYPE_SIZE 4 +#define DPNI_EXTRACT_TYPE_SHIFT 0 +#define DPNI_EXTRACT_TYPE_SIZE 4 + +struct dpni_dist_extract { + /* word 0 */ + u8 prot; + /* EFH type stored in the 4 least significant bits */ + u8 efh_type; + u8 size; + u8 offset; + __le32 field; + /* word 1 */ + u8 hdr_index; + u8 constant; + u8 num_of_repeats; + u8 num_of_byte_masks; + /* Extraction type is stored in the 4 LSBs */ + u8 extract_type; + u8 pad[3]; + /* word 2 */ + struct dpni_mask_cfg masks[4]; +}; + +struct dpni_ext_set_rx_tc_dist { + /* extension word 0 */ + u8 num_extracts; + u8 pad[7]; + /* words 1..25 */ + struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; +}; + +struct dpni_cmd_get_queue { + u8 qtype; + u8 tc; + u8 index; +}; + +#define DPNI_DEST_TYPE_SHIFT 0 +#define DPNI_DEST_TYPE_SIZE 4 +#define DPNI_STASH_CTRL_SHIFT 6 +#define DPNI_STASH_CTRL_SIZE 1 +#define DPNI_HOLD_ACTIVE_SHIFT 7 +#define DPNI_HOLD_ACTIVE_SIZE 1 + +struct dpni_rsp_get_queue { + /* response word 0 */ + __le64 pad0; + /* response word 1 */ + __le32 dest_id; + __le16 pad1; + u8 dest_prio; + /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */ + u8 flags; + /* response word 2 */ + __le64 flc; + /* response word 3 */ + __le64 user_context; + /* response word 4 */ + __le32 fqid; + __le16 qdbin; +}; + +struct dpni_cmd_set_queue { + /* cmd word 0 */ + u8 qtype; + u8 tc; + u8 index; + u8 options; + __le32 pad0; + /* cmd word 1 */ + __le32 dest_id; + __le16 pad1; + u8 dest_prio; + u8 flags; + /* cmd word 2 */ + __le64 flc; + /* cmd word 3 */ + __le64 user_context; +}; + +struct dpni_cmd_set_taildrop { + /* cmd word 0 */ + u8 congestion_point; + u8 qtype; + u8 tc; + u8 index; + __le32 pad0; + /* cmd word 1 */ + /* Only least significant bit is relevant */ + u8 enable; + u8 pad1; + u8 units; + u8 pad2; + __le32 threshold; +}; + +struct dpni_cmd_get_taildrop { + u8 congestion_point; + u8 qtype; + u8 tc; + u8 index; +}; + +struct dpni_rsp_get_taildrop { + /* cmd word 0 */ + __le64 pad0; + /* cmd word 1 */ + /* only least significant bit is relevant */ + u8 enable; + u8 pad1; + u8 units; + u8 pad2; + __le32 threshold; +}; + +struct dpni_rsp_get_api_version { + __le16 major; + __le16 minor; +}; + +#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0 +#define DPNI_RX_FS_DIST_ENABLE_SIZE 1 +struct dpni_cmd_set_rx_fs_dist { + __le16 dist_size; + u8 enable; + u8 tc; + __le16 miss_flow_id; + __le16 pad; + __le64 key_cfg_iova; +}; + +#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0 +#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1 +struct dpni_cmd_set_rx_hash_dist { + __le16 dist_size; + u8 enable; + u8 tc; + __le32 pad; + __le64 key_cfg_iova; +}; + +struct dpni_cmd_add_fs_entry { + /* cmd word 0 */ + __le16 options; + u8 tc_id; + u8 key_size; + __le16 index; + __le16 flow_id; + /* cmd word 1 */ + __le64 key_iova; + /* cmd word 2 */ + __le64 mask_iova; + /* cmd word 3 */ + __le64 flc; +}; + +struct dpni_cmd_remove_fs_entry { + /* cmd word 0 */ + __le16 pad0; + u8 tc_id; + u8 key_size; + __le32 pad1; + /* cmd word 1 */ + __le64 key_iova; + /* cmd word 2 */ + __le64 mask_iova; +}; + +#endif /* _FSL_DPNI_CMD_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c new file mode 100644 index 000000000000..220dfc806a24 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -0,0 +1,1752 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + */ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/fsl/mc.h> +#include "dpni.h" +#include "dpni-cmd.h" + +/** + * dpni_prepare_key_cfg() - function prepare extract parameters + * @cfg: defining a full Key Generation profile (rule) + * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA + * + * This function has to be called before the following functions: + * - dpni_set_rx_tc_dist() + * - dpni_set_qos_table() + */ +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf) +{ + int i, j; + struct dpni_ext_set_rx_tc_dist *dpni_ext; + struct dpni_dist_extract *extr; + + if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS) + return -EINVAL; + + dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf; + dpni_ext->num_extracts = cfg->num_extracts; + + for (i = 0; i < cfg->num_extracts; i++) { + extr = &dpni_ext->extracts[i]; + + switch (cfg->extracts[i].type) { + case DPKG_EXTRACT_FROM_HDR: + extr->prot = cfg->extracts[i].extract.from_hdr.prot; + dpni_set_field(extr->efh_type, EFH_TYPE, + cfg->extracts[i].extract.from_hdr.type); + extr->size = cfg->extracts[i].extract.from_hdr.size; + extr->offset = cfg->extracts[i].extract.from_hdr.offset; + extr->field = cpu_to_le32( + cfg->extracts[i].extract.from_hdr.field); + extr->hdr_index = + cfg->extracts[i].extract.from_hdr.hdr_index; + break; + case DPKG_EXTRACT_FROM_DATA: + extr->size = cfg->extracts[i].extract.from_data.size; + extr->offset = + cfg->extracts[i].extract.from_data.offset; + break; + case DPKG_EXTRACT_FROM_PARSE: + extr->size = cfg->extracts[i].extract.from_parse.size; + extr->offset = + cfg->extracts[i].extract.from_parse.offset; + break; + default: + return -EINVAL; + } + + extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks; + dpni_set_field(extr->extract_type, EXTRACT_TYPE, + cfg->extracts[i].type); + + for (j = 0; j < DPKG_NUM_OF_MASKS; j++) { + extr->masks[j].mask = cfg->extracts[i].masks[j].mask; + extr->masks[j].offset = + cfg->extracts[i].masks[j].offset; + } + } + + return 0; +} + +/** + * dpni_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpni_id: DPNI unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpni_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpni_id, + u16 *token) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_open *cmd_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpni_cmd_open *)cmd.params; + cmd_params->dpni_id = cpu_to_le32(dpni_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpni_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_pools() - Set buffer pools configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Buffer pools configuration + * + * mandatory for DPNI operation + * warning:Allowed only when DPNI is disabled + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_pools(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_pools_cfg *cfg) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_pools *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_pools *)cmd.params; + cmd_params->num_dpbp = cfg->num_dpbp; + for (i = 0; i < DPNI_MAX_DPBP; i++) { + cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); + cmd_params->buffer_size[i] = + cpu_to_le16(cfg->pools[i].buffer_size); + cmd_params->backup_pool_mask |= + DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i); + } + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_enable() - Enable the DPNI, allow sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_disable() - Disable the DPNI, stop sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_is_enabled() - Check if the DPNI is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_is_enabled(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_is_enabled *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_is_enabled *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_reset() - Reset the DPNI, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_irq_enable() - Set overall interrupt state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @en: Interrupt state: - enable = 1, disable = 0 + * + * Allows GPP software to control when interrupts are generated. + * Each interrupt can have up to 32 causes. The enable/disable control's the + * overall interrupt state. if the interrupt is disabled no causes will cause + * an interrupt. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_irq_enable *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_irq_enable() - Get overall interrupt state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @en: Returned interrupt state - enable = 1, disable = 0 + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_enable *cmd_params; + struct dpni_rsp_get_irq_enable *rsp_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_irq_mask() - Set interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @mask: event mask to trigger interrupt; + * each bit: + * 0 = ignore event + * 1 = consider event for asserting IRQ + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_irq_mask *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params; + cmd_params->mask = cpu_to_le32(mask); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_irq_mask() - Get interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @mask: Returned event mask to trigger interrupt + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_mask *cmd_params; + struct dpni_rsp_get_irq_mask *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params; + *mask = le32_to_cpu(rsp_params->mask); + + return 0; +} + +/** + * dpni_get_irq_status() - Get the current status of any pending interrupts. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @status: Returned interrupts status - one bit per cause: + * 0 = no interrupt pending + * 1 = interrupt pending + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_status *cmd_params; + struct dpni_rsp_get_irq_status *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(*status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params; + *status = le32_to_cpu(rsp_params->status); + + return 0; +} + +/** + * dpni_clear_irq_status() - Clear a pending interrupt's status + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @status: bits to clear (W1C) - one bit per cause: + * 0 = don't change + * 1 = clear status bit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_clear_irq_status *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params; + cmd_params->irq_index = irq_index; + cmd_params->status = cpu_to_le32(status); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_attributes() - Retrieve DPNI attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @attr: Object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_attr *attr) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_attr *rsp_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_attr *)cmd.params; + attr->options = le32_to_cpu(rsp_params->options); + attr->num_queues = rsp_params->num_queues; + attr->num_tcs = rsp_params->num_tcs; + attr->mac_filter_entries = rsp_params->mac_filter_entries; + attr->vlan_filter_entries = rsp_params->vlan_filter_entries; + attr->qos_entries = rsp_params->qos_entries; + attr->fs_entries = le16_to_cpu(rsp_params->fs_entries); + attr->qos_key_size = rsp_params->qos_key_size; + attr->fs_key_size = rsp_params->fs_key_size; + attr->wriop_version = le16_to_cpu(rsp_params->wriop_version); + + return 0; +} + +/** + * dpni_set_errors_behavior() - Set errors behavior + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Errors configuration + * + * this function may be called numerous times with different + * error masks + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_error_cfg *cfg) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_errors_behavior *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params; + cmd_params->errors = cpu_to_le32(cfg->errors); + dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action); + dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_buffer_layout() - Retrieve buffer layout attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to retrieve configuration for + * @layout: Returns buffer layout attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + struct dpni_buffer_layout *layout) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_buffer_layout *cmd_params; + struct dpni_rsp_get_buffer_layout *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params; + layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS); + layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR); + layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS); + layout->private_data_size = le16_to_cpu(rsp_params->private_data_size); + layout->data_align = le16_to_cpu(rsp_params->data_align); + layout->data_head_room = le16_to_cpu(rsp_params->head_room); + layout->data_tail_room = le16_to_cpu(rsp_params->tail_room); + + return 0; +} + +/** + * dpni_set_buffer_layout() - Set buffer layout configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue this configuration applies to + * @layout: Buffer layout configuration + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + const struct dpni_buffer_layout *layout) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_buffer_layout *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->options = cpu_to_le16(layout->options); + dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp); + dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result); + dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status); + cmd_params->private_data_size = cpu_to_le16(layout->private_data_size); + cmd_params->data_align = cpu_to_le16(layout->data_align); + cmd_params->head_room = cpu_to_le16(layout->data_head_room); + cmd_params->tail_room = cpu_to_le16(layout->data_tail_room); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_offload() - Set DPNI offload configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @type: Type of DPNI offload + * @config: Offload configuration. + * For checksum offloads, non-zero value enables the offload + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ + +int dpni_set_offload(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_offload type, + u32 config) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_offload *cmd_params; + + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_offload *)cmd.params; + cmd_params->dpni_offload = type; + cmd_params->config = cpu_to_le32(config); + + return mc_send_command(mc_io, &cmd); +} + +int dpni_get_offload(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_offload type, + u32 *config) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_offload *cmd_params; + struct dpni_rsp_get_offload *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_offload *)cmd.params; + cmd_params->dpni_offload = type; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_offload *)cmd.params; + *config = le32_to_cpu(rsp_params->config); + + return 0; +} + +/** + * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used + * for enqueue operations + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to receive QDID for + * @qdid: Returned virtual QDID value that should be used as an argument + * in all enqueue operations + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_qdid(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u16 *qdid) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_qdid *cmd_params; + struct dpni_rsp_get_qdid *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_qdid *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_qdid *)cmd.params; + *qdid = le16_to_cpu(rsp_params->qdid); + + return 0; +} + +/** + * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @data_offset: Tx data offset (from start of buffer) + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *data_offset) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_tx_data_offset *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params; + *data_offset = le16_to_cpu(rsp_params->data_offset); + + return 0; +} + +/** + * dpni_set_link_cfg() - set the link configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Link configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_link_cfg *cfg) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_link_cfg *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params; + cmd_params->rate = cpu_to_le32(cfg->rate); + cmd_params->options = cpu_to_le64(cfg->options); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_link_state() - Return the link state (either up or down) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @state: Returned link state; + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_link_state(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_link_state *state) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_link_state *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_link_state *)cmd.params; + state->up = dpni_get_field(rsp_params->flags, LINK_STATE); + state->rate = le32_to_cpu(rsp_params->rate); + state->options = le64_to_cpu(rsp_params->options); + + return 0; +} + +/** + * dpni_set_max_frame_length() - Set the maximum received frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @max_frame_length: Maximum received frame length (in + * bytes); frame is discarded if its + * length exceeds this value + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 max_frame_length) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_max_frame_length *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params; + cmd_params->max_frame_length = cpu_to_le16(max_frame_length); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_max_frame_length() - Get the maximum received frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @max_frame_length: Maximum received frame length (in + * bytes); frame is discarded if its + * length exceeds this value + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *max_frame_length) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_max_frame_length *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params; + *max_frame_length = le16_to_cpu(rsp_params->max_frame_length); + + return 0; +} + +/** + * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_multicast_promisc *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_multicast_promisc() - Get multicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_multicast_promisc *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_unicast_promisc *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_unicast_promisc() - Get unicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_unicast_promisc *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_primary_mac_addr() - Set the primary MAC address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to set as primary address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_primary_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_primary_mac_addr() - Get the primary MAC address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: Returned MAC address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 mac_addr[6]) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_primary_mac_addr *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + mac_addr[5 - i] = rsp_params->mac_addr[i]; + + return 0; +} + +/** + * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical + * port the DPNI is attached to + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address of the physical port, if any, otherwise 0 + * + * The primary MAC address is not cleared by this operation. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 mac_addr[6]) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_port_mac_addr *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + mac_addr[5 - i] = rsp_params->mac_addr[i]; + + return 0; +} + +/** + * dpni_add_mac_addr() - Add MAC address filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to add + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_add_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_mac_addr() - Remove MAC address filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_remove_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @unicast: Set to '1' to clear unicast addresses + * @multicast: Set to '1' to clear multicast addresses + * + * The primary MAC address is not cleared by this operation. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int unicast, + int multicast) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_clear_mac_filters *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params; + dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast); + dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @cfg: Traffic class distribution configuration + * + * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg() + * first to prepare the key_cfg_iova parameter + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + const struct dpni_rx_tc_dist_cfg *cfg) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_rx_tc_dist *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + cmd_params->tc_id = tc_id; + dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode); + dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action); + cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id); + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_queue() - Set queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - all queue types are supported, although + * the command is ignored for Tx + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated for the + * same TC. Value must be in range 0 to NUM_QUEUES - 1 + * @options: A combination of DPNI_QUEUE_OPT_ values that control what + * configuration options are set on the queue + * @queue: Queue structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_queue(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + u8 options, + const struct dpni_queue *queue) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_queue *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_queue *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + cmd_params->options = options; + cmd_params->dest_id = cpu_to_le32(queue->destination.id); + cmd_params->dest_prio = queue->destination.priority; + dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type); + dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control); + dpni_set_field(cmd_params->flags, HOLD_ACTIVE, + queue->destination.hold_active); + cmd_params->flc = cpu_to_le64(queue->flc.value); + cmd_params->user_context = cpu_to_le64(queue->user_context); + + /* send command to mc */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_queue() - Get queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - all queue types are supported + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated for the + * same TC. Value must be in range 0 to NUM_QUEUES - 1 + * @queue: Queue configuration structure + * @qid: Queue identification + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_queue(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + struct dpni_queue *queue, + struct dpni_queue_id *qid) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_queue *cmd_params; + struct dpni_rsp_get_queue *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_queue *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_queue *)cmd.params; + queue->destination.id = le32_to_cpu(rsp_params->dest_id); + queue->destination.priority = rsp_params->dest_prio; + queue->destination.type = dpni_get_field(rsp_params->flags, + DEST_TYPE); + queue->flc.stash_control = dpni_get_field(rsp_params->flags, + STASH_CTRL); + queue->destination.hold_active = dpni_get_field(rsp_params->flags, + HOLD_ACTIVE); + queue->flc.value = le64_to_cpu(rsp_params->flc); + queue->user_context = le64_to_cpu(rsp_params->user_context); + qid->fqid = le32_to_cpu(rsp_params->fqid); + qid->qdbin = le16_to_cpu(rsp_params->qdbin); + + return 0; +} + +/** + * dpni_get_statistics() - Get DPNI statistics + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @page: Selects the statistics page to retrieve, see + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2. + * @stat: Structure containing the statistics + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_statistics(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 page, + union dpni_statistics *stat) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_statistics *cmd_params; + struct dpni_rsp_get_statistics *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_statistics *)cmd.params; + cmd_params->page_number = page; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_statistics *)cmd.params; + for (i = 0; i < DPNI_STATISTICS_CNT; i++) + stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]); + + return 0; +} + +/** + * dpni_set_taildrop() - Set taildrop per queue or TC + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cg_point: Congestion point + * @q_type: Queue type on which the taildrop is configured. + * Only Rx queues are supported for now + * @tc: Traffic class to apply this taildrop to + * @q_index: Index of the queue if the DPNI supports multiple queues for + * traffic distribution. Ignored if CONGESTION_POINT is not 0. + * @taildrop: Taildrop structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_taildrop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + struct dpni_taildrop *taildrop) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_taildrop *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params; + cmd_params->congestion_point = cg_point; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable); + cmd_params->units = taildrop->units; + cmd_params->threshold = cpu_to_le32(taildrop->threshold); + + /* send command to mc */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_taildrop() - Get taildrop information + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cg_point: Congestion point + * @q_type: Queue type on which the taildrop is configured. + * Only Rx queues are supported for now + * @tc: Traffic class to apply this taildrop to + * @q_index: Index of the queue if the DPNI supports multiple queues for + * traffic distribution. Ignored if CONGESTION_POINT is not 0. + * @taildrop: Taildrop structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_taildrop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + struct dpni_taildrop *taildrop) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_taildrop *cmd_params; + struct dpni_rsp_get_taildrop *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params; + cmd_params->congestion_point = cg_point; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params; + taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE); + taildrop->units = rsp_params->units; + taildrop->threshold = le32_to_cpu(rsp_params->threshold); + + return 0; +} + +/** + * dpni_get_api_version() - Get Data Path Network Interface API version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path network interface API + * @minor_ver: Minor version of data path network interface API + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver) +{ + struct dpni_rsp_get_api_version *rsp_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION, + cmd_flags, 0); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpni_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->major); + *minor_ver = le16_to_cpu(rsp_params->minor); + + return 0; +} + +/** + * dpni_set_rx_fs_dist() - Set Rx flow steering distribution + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Distribution configuration + * + * If the FS is already enabled with a previous call the classification + * key will be changed but all the table rules are kept. If the + * existing rules do not match the key the results will not be + * predictable. It is the user responsibility to keep key integrity. + * If cfg.enable is set to 1 the command will create a flow steering table + * and will classify packets according to this table. The packets that + * miss all the table rules will be classified according to settings + * made in dpni_set_rx_hash_dist() + * If cfg.enable is set to 0 the command will clear flow steering table. + * The packets will be classified according to settings made in + * dpni_set_rx_hash_dist() + */ +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_rx_dist_cfg *cfg) +{ + struct dpni_cmd_set_rx_fs_dist *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable); + cmd_params->tc = cfg->tc; + cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id); + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_rx_hash_dist() - Set Rx hash distribution + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Distribution configuration + * If cfg.enable is set to 1 the packets will be classified using a hash + * function based on the key received in cfg.key_cfg_iova parameter. + * If cfg.enable is set to 0 the packets will be sent to the default queue + */ +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_rx_dist_cfg *cfg) +{ + struct dpni_cmd_set_rx_hash_dist *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + dpni_set_field(cmd_params->enable, RX_HASH_DIST_ENABLE, cfg->enable); + cmd_params->tc = cfg->tc; + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class + * (to select a flow ID) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @index: Location in the FS table where to insert the entry. + * Only relevant if MASKING is enabled for FS + * classification on this DPNI, it is ignored for exact match. + * @cfg: Flow steering rule to add + * @action: Action to be taken as result of a classification hit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_fs_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + u16 index, + const struct dpni_rule_cfg *cfg, + const struct dpni_fs_action_cfg *action) +{ + struct dpni_cmd_add_fs_entry *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->index = cpu_to_le16(index); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + cmd_params->options = cpu_to_le16(action->options); + cmd_params->flow_id = cpu_to_le16(action->flow_id); + cmd_params->flc = cpu_to_le64(action->flc); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific + * traffic class + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @cfg: Flow steering rule to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + const struct dpni_rule_cfg *cfg) +{ + struct dpni_cmd_remove_fs_entry *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h new file mode 100644 index 000000000000..a521242e2353 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -0,0 +1,921 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + */ +#ifndef __FSL_DPNI_H +#define __FSL_DPNI_H + +#include "dpkg.h" + +struct fsl_mc_io; + +/** + * Data Path Network Interface API + * Contains initialization APIs and runtime control APIs for DPNI + */ + +/** General DPNI macros */ + +/** + * Maximum number of traffic classes + */ +#define DPNI_MAX_TC 8 +/** + * Maximum number of buffer pools per DPNI + */ +#define DPNI_MAX_DPBP 8 + +/** + * All traffic classes considered; see dpni_set_queue() + */ +#define DPNI_ALL_TCS (u8)(-1) +/** + * All flows within traffic class considered; see dpni_set_queue() + */ +#define DPNI_ALL_TC_FLOWS (u16)(-1) +/** + * Generate new flow ID; see dpni_set_queue() + */ +#define DPNI_NEW_FLOW_ID (u16)(-1) + +/** + * Tx traffic is always released to a buffer pool on transmit, there are no + * resources allocated to have the frames confirmed back to the source after + * transmission. + */ +#define DPNI_OPT_TX_FRM_RELEASE 0x000001 +/** + * Disables support for MAC address filtering for addresses other than primary + * MAC address. This affects both unicast and multicast. Promiscuous mode can + * still be enabled/disabled for both unicast and multicast. If promiscuous mode + * is disabled, only traffic matching the primary MAC address will be accepted. + */ +#define DPNI_OPT_NO_MAC_FILTER 0x000002 +/** + * Allocate policers for this DPNI. They can be used to rate-limit traffic per + * traffic class (TC) basis. + */ +#define DPNI_OPT_HAS_POLICING 0x000004 +/** + * Congestion can be managed in several ways, allowing the buffer pool to + * deplete on ingress, taildrop on each queue or use congestion groups for sets + * of queues. If set, it configures a single congestion groups across all TCs. + * If reset, a congestion group is allocated for each TC. Only relevant if the + * DPNI has multiple traffic classes. + */ +#define DPNI_OPT_SHARED_CONGESTION 0x000008 +/** + * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all + * look-ups are exact match. Note that TCAM is not available on LS1088 and its + * variants. Setting this bit on these SoCs will trigger an error. + */ +#define DPNI_OPT_HAS_KEY_MASKING 0x000010 +/** + * Disables the flow steering table. + */ +#define DPNI_OPT_NO_FS 0x000020 + +int dpni_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpni_id, + u16 *token); + +int dpni_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * struct dpni_pools_cfg - Structure representing buffer pools configuration + * @num_dpbp: Number of DPBPs + * @pools: Array of buffer pools parameters; The number of valid entries + * must match 'num_dpbp' value + * @pools.dpbp_id: DPBP object ID + * @pools.buffer_size: Buffer size + * @pools.backup_pool: Backup pool + */ +struct dpni_pools_cfg { + u8 num_dpbp; + struct { + int dpbp_id; + u16 buffer_size; + int backup_pool; + } pools[DPNI_MAX_DPBP]; +}; + +int dpni_set_pools(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_pools_cfg *cfg); + +int dpni_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpni_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpni_is_enabled(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en); + +int dpni_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * DPNI IRQ Index and Events + */ + +/** + * IRQ index + */ +#define DPNI_IRQ_INDEX 0 +/** + * IRQ event - indicates a change in link state + */ +#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 + +int dpni_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en); + +int dpni_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en); + +int dpni_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask); + +int dpni_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask); + +int dpni_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status); + +int dpni_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status); + +/** + * struct dpni_attr - Structure representing DPNI attributes + * @options: Any combination of the following options: + * DPNI_OPT_TX_FRM_RELEASE + * DPNI_OPT_NO_MAC_FILTER + * DPNI_OPT_HAS_POLICING + * DPNI_OPT_SHARED_CONGESTION + * DPNI_OPT_HAS_KEY_MASKING + * DPNI_OPT_NO_FS + * @num_queues: Number of Tx and Rx queues used for traffic distribution. + * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI. + * @mac_filter_entries: Number of entries in the MAC address filtering table. + * @vlan_filter_entries: Number of entries in the VLAN address filtering table. + * @qos_entries: Number of entries in the QoS classification table. + * @fs_entries: Number of entries in the flow steering table. + * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger + * than this when adding QoS entries will result in an error. + * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a + * key larger than this when composing the hash + FS key will + * result in an error. + * @wriop_version: Version of WRIOP HW block. The 3 version values are stored + * on 6, 5, 5 bits respectively. + */ +struct dpni_attr { + u32 options; + u8 num_queues; + u8 num_tcs; + u8 mac_filter_entries; + u8 vlan_filter_entries; + u8 qos_entries; + u16 fs_entries; + u8 qos_key_size; + u8 fs_key_size; + u16 wriop_version; +}; + +int dpni_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_attr *attr); + +/** + * DPNI errors + */ + +/** + * Extract out of frame header error + */ +#define DPNI_ERROR_EOFHE 0x00020000 +/** + * Frame length error + */ +#define DPNI_ERROR_FLE 0x00002000 +/** + * Frame physical error + */ +#define DPNI_ERROR_FPE 0x00001000 +/** + * Parsing header error + */ +#define DPNI_ERROR_PHE 0x00000020 +/** + * Parser L3 checksum error + */ +#define DPNI_ERROR_L3CE 0x00000004 +/** + * Parser L3 checksum error + */ +#define DPNI_ERROR_L4CE 0x00000001 + +/** + * enum dpni_error_action - Defines DPNI behavior for errors + * @DPNI_ERROR_ACTION_DISCARD: Discard the frame + * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow + * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue + */ +enum dpni_error_action { + DPNI_ERROR_ACTION_DISCARD = 0, + DPNI_ERROR_ACTION_CONTINUE = 1, + DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 +}; + +/** + * struct dpni_error_cfg - Structure representing DPNI errors treatment + * @errors: Errors mask; use 'DPNI_ERROR__<X> + * @error_action: The desired action for the errors mask + * @set_frame_annotation: Set to '1' to mark the errors in frame annotation + * status (FAS); relevant only for the non-discard action + */ +struct dpni_error_cfg { + u32 errors; + enum dpni_error_action error_action; + int set_frame_annotation; +}; + +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_error_cfg *cfg); + +/** + * DPNI buffer layout modification options + */ + +/** + * Select to modify the time-stamp setting + */ +#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 +/** + * Select to modify the parser-result setting; not applicable for Tx + */ +#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 +/** + * Select to modify the frame-status setting + */ +#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 +/** + * Select to modify the private-data-size setting + */ +#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 +/** + * Select to modify the data-alignment setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 +/** + * Select to modify the data-head-room setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 +/** + * Select to modify the data-tail-room setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 + +/** + * struct dpni_buffer_layout - Structure representing DPNI buffer layout + * @options: Flags representing the suggested modifications to the buffer + * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags + * @pass_timestamp: Pass timestamp value + * @pass_parser_result: Pass parser results + * @pass_frame_status: Pass frame status + * @private_data_size: Size kept for private data (in bytes) + * @data_align: Data alignment + * @data_head_room: Data head room + * @data_tail_room: Data tail room + */ +struct dpni_buffer_layout { + u32 options; + int pass_timestamp; + int pass_parser_result; + int pass_frame_status; + u16 private_data_size; + u16 data_align; + u16 data_head_room; + u16 data_tail_room; +}; + +/** + * enum dpni_queue_type - Identifies a type of queue targeted by the command + * @DPNI_QUEUE_RX: Rx queue + * @DPNI_QUEUE_TX: Tx queue + * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue + * @DPNI_QUEUE_RX_ERR: Rx error queue + */enum dpni_queue_type { + DPNI_QUEUE_RX, + DPNI_QUEUE_TX, + DPNI_QUEUE_TX_CONFIRM, + DPNI_QUEUE_RX_ERR, +}; + +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + struct dpni_buffer_layout *layout); + +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + const struct dpni_buffer_layout *layout); + +/** + * enum dpni_offload - Identifies a type of offload targeted by the command + * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation + * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation + * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation + * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation + */ +enum dpni_offload { + DPNI_OFF_RX_L3_CSUM, + DPNI_OFF_RX_L4_CSUM, + DPNI_OFF_TX_L3_CSUM, + DPNI_OFF_TX_L4_CSUM, +}; + +int dpni_set_offload(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_offload type, + u32 config); + +int dpni_get_offload(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_offload type, + u32 *config); + +int dpni_get_qdid(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u16 *qdid); + +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *data_offset); + +#define DPNI_STATISTICS_CNT 7 + +/** + * union dpni_statistics - Union describing the DPNI statistics + * @page_0: Page_0 statistics structure + * @page_0.ingress_all_frames: Ingress frame count + * @page_0.ingress_all_bytes: Ingress byte count + * @page_0.ingress_multicast_frames: Ingress multicast frame count + * @page_0.ingress_multicast_bytes: Ingress multicast byte count + * @page_0.ingress_broadcast_frames: Ingress broadcast frame count + * @page_0.ingress_broadcast_bytes: Ingress broadcast byte count + * @page_1: Page_1 statistics structure + * @page_1.egress_all_frames: Egress frame count + * @page_1.egress_all_bytes: Egress byte count + * @page_1.egress_multicast_frames: Egress multicast frame count + * @page_1.egress_multicast_bytes: Egress multicast byte count + * @page_1.egress_broadcast_frames: Egress broadcast frame count + * @page_1.egress_broadcast_bytes: Egress broadcast byte count + * @page_2: Page_2 statistics structure + * @page_2.ingress_filtered_frames: Ingress filtered frame count + * @page_2.ingress_discarded_frames: Ingress discarded frame count + * @page_2.ingress_nobuffer_discards: Ingress discarded frame count due to + * lack of buffers + * @page_2.egress_discarded_frames: Egress discarded frame count + * @page_2.egress_confirmed_frames: Egress confirmed frame count + * @raw: raw statistics structure, used to index counters + */ +union dpni_statistics { + struct { + u64 ingress_all_frames; + u64 ingress_all_bytes; + u64 ingress_multicast_frames; + u64 ingress_multicast_bytes; + u64 ingress_broadcast_frames; + u64 ingress_broadcast_bytes; + } page_0; + struct { + u64 egress_all_frames; + u64 egress_all_bytes; + u64 egress_multicast_frames; + u64 egress_multicast_bytes; + u64 egress_broadcast_frames; + u64 egress_broadcast_bytes; + } page_1; + struct { + u64 ingress_filtered_frames; + u64 ingress_discarded_frames; + u64 ingress_nobuffer_discards; + u64 egress_discarded_frames; + u64 egress_confirmed_frames; + } page_2; + struct { + u64 counter[DPNI_STATISTICS_CNT]; + } raw; +}; + +int dpni_get_statistics(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 page, + union dpni_statistics *stat); + +/** + * Enable auto-negotiation + */ +#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL +/** + * Enable half-duplex mode + */ +#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +/** + * Enable pause frames + */ +#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL +/** + * Enable a-symmetric pause frames + */ +#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL + +/** + * struct - Structure representing DPNI link configuration + * @rate: Rate + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values + */ +struct dpni_link_cfg { + u32 rate; + u64 options; +}; + +int dpni_set_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_link_cfg *cfg); + +/** + * struct dpni_link_state - Structure representing DPNI link state + * @rate: Rate + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values + * @up: Link state; '0' for down, '1' for up + */ +struct dpni_link_state { + u32 rate; + u64 options; + int up; +}; + +int dpni_get_link_state(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_link_state *state); + +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 max_frame_length); + +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *max_frame_length); + +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int en); + +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en); + +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int en); + +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en); + +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]); + +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 mac_addr[6]); + +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, + u32 cm_flags, + u16 token, + u8 mac_addr[6]); + +int dpni_add_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]); + +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]); + +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int unicast, + int multicast); + +/** + * enum dpni_dist_mode - DPNI distribution mode + * @DPNI_DIST_MODE_NONE: No distribution + * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if + * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation + * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if + * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation + */ +enum dpni_dist_mode { + DPNI_DIST_MODE_NONE = 0, + DPNI_DIST_MODE_HASH = 1, + DPNI_DIST_MODE_FS = 2 +}; + +/** + * enum dpni_fs_miss_action - DPNI Flow Steering miss action + * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame + * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id + * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash + */ +enum dpni_fs_miss_action { + DPNI_FS_MISS_DROP = 0, + DPNI_FS_MISS_EXPLICIT_FLOWID = 1, + DPNI_FS_MISS_HASH = 2 +}; + +/** + * struct dpni_fs_tbl_cfg - Flow Steering table configuration + * @miss_action: Miss action selection + * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' + */ +struct dpni_fs_tbl_cfg { + enum dpni_fs_miss_action miss_action; + u16 default_flow_id; +}; + +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, + u8 *key_cfg_buf); + +/** + * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration + * @dist_size: Set the distribution size; + * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, + * 112,128,192,224,256,384,448,512,768,896,1024 + * @dist_mode: Distribution mode + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * the extractions to be used for the distribution key by calling + * dpni_prepare_key_cfg() relevant only when + * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' + * @fs_cfg: Flow Steering table configuration; only relevant if + * 'dist_mode = DPNI_DIST_MODE_FS' + */ +struct dpni_rx_tc_dist_cfg { + u16 dist_size; + enum dpni_dist_mode dist_mode; + u64 key_cfg_iova; + struct dpni_fs_tbl_cfg fs_cfg; +}; + +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + const struct dpni_rx_tc_dist_cfg *cfg); + +/** + * When used for fs_miss_flow_id in function dpni_set_rx_dist, + * will signal to dpni to drop all unclassified frames + */ +#define DPNI_FS_MISS_DROP ((uint16_t)-1) + +/** + * struct dpni_rx_dist_cfg - Rx distribution configuration + * @dist_size: distribution size + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * the extractions to be used for the distribution key by calling + * dpni_prepare_key_cfg(); relevant only when enable!=0 otherwise + * it can be '0' + * @enable: enable/disable the distribution. + * @tc: TC id for which distribution is set + * @fs_miss_flow_id: when packet misses all rules from flow steering table and + * hash is disabled it will be put into this queue id; use + * DPNI_FS_MISS_DROP to drop frames. The value of this field is + * used only when flow steering distribution is enabled and hash + * distribution is disabled + */ +struct dpni_rx_dist_cfg { + u16 dist_size; + u64 key_cfg_iova; + u8 enable; + u8 tc; + u16 fs_miss_flow_id; +}; + +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_rx_dist_cfg *cfg); + +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_rx_dist_cfg *cfg); + +/** + * enum dpni_dest - DPNI destination types + * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and + * does not generate FQDAN notifications; user is expected to + * dequeue from the queue based on polling or other user-defined + * method + * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN + * notifications to the specified DPIO; user is expected to dequeue + * from the queue only after notification is received + * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate + * FQDAN notifications, but is connected to the specified DPCON + * object; user is expected to dequeue from the DPCON channel + */ +enum dpni_dest { + DPNI_DEST_NONE = 0, + DPNI_DEST_DPIO = 1, + DPNI_DEST_DPCON = 2 +}; + +/** + * struct dpni_queue - Queue structure + * @destination - Destination structure + * @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0. + * Identifies either a DPIO or a DPCON object. + * Not relevant for Tx queues. + * @destination.type: May be one of the following: + * 0 - No destination, queue can be manually + * queried, but will not push traffic or + * notifications to a DPIO; + * 1 - The destination is a DPIO. When traffic + * becomes available in the queue a FQDAN + * (FQ data available notification) will be + * generated to selected DPIO; + * 2 - The destination is a DPCON. The queue is + * associated with a DPCON object for the + * purpose of scheduling between multiple + * queues. The DPCON may be independently + * configured to generate notifications. + * Not relevant for Tx queues. + * @destination.hold_active: Hold active, maintains a queue scheduled for longer + * in a DPIO during dequeue to reduce spread of traffic. + * Only relevant if queues are + * not affined to a single DPIO. + * @user_context: User data, presented to the user along with any frames + * from this queue. Not relevant for Tx queues. + * @flc: FD FLow Context structure + * @flc.value: Default FLC value for traffic dequeued from + * this queue. Please check description of FD + * structure for more information. + * Note that FLC values set using dpni_add_fs_entry, + * if any, take precedence over values per queue. + * @flc.stash_control: Boolean, indicates whether the 6 lowest + * - significant bits are used for stash control. + * significant bits are used for stash control. If set, the 6 + * least significant bits in value are interpreted as follows: + * - bits 0-1: indicates the number of 64 byte units of context + * that are stashed. FLC value is interpreted as a memory address + * in this case, excluding the 6 LS bits. + * - bits 2-3: indicates the number of 64 byte units of frame + * annotation to be stashed. Annotation is placed at FD[ADDR]. + * - bits 4-5: indicates the number of 64 byte units of frame + * data to be stashed. Frame data is placed at FD[ADDR] + + * FD[OFFSET]. + * For more details check the Frame Descriptor section in the + * hardware documentation. + */ +struct dpni_queue { + struct { + u16 id; + enum dpni_dest type; + char hold_active; + u8 priority; + } destination; + u64 user_context; + struct { + u64 value; + char stash_control; + } flc; +}; + +/** + * struct dpni_queue_id - Queue identification, used for enqueue commands + * or queue control + * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ + * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant + * for Tx queues. + */ +struct dpni_queue_id { + u32 fqid; + u16 qdbin; +}; + +/** + * Set User Context + */ +#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 +#define DPNI_QUEUE_OPT_DEST 0x00000002 +#define DPNI_QUEUE_OPT_FLC 0x00000004 +#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008 + +int dpni_set_queue(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + u8 options, + const struct dpni_queue *queue); + +int dpni_get_queue(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + struct dpni_queue *queue, + struct dpni_queue_id *qid); + +/** + * enum dpni_congestion_unit - DPNI congestion units + * @DPNI_CONGESTION_UNIT_BYTES: bytes units + * @DPNI_CONGESTION_UNIT_FRAMES: frames units + */ +enum dpni_congestion_unit { + DPNI_CONGESTION_UNIT_BYTES = 0, + DPNI_CONGESTION_UNIT_FRAMES +}; + +/** + * enum dpni_congestion_point - Structure representing congestion point + * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and + * QUEUE_INDEX + * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to + * define the DPNI this can be either per TC (default) or per + * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create). + * QUEUE_INDEX is ignored if this type is used. + */ +enum dpni_congestion_point { + DPNI_CP_QUEUE, + DPNI_CP_GROUP, +}; + +/** + * struct dpni_taildrop - Structure representing the taildrop + * @enable: Indicates whether the taildrop is active or not. + * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports + * byte units, this field is ignored and assumed = 0 if + * CONGESTION_POINT is 0. + * @threshold: Threshold value, in units identified by UNITS field. Value 0 + * cannot be used as a valid taildrop threshold, THRESHOLD must + * be > 0 if the taildrop is enabled. + */ +struct dpni_taildrop { + char enable; + enum dpni_congestion_unit units; + u32 threshold; +}; + +int dpni_set_taildrop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type q_type, + u8 tc, + u8 q_index, + struct dpni_taildrop *taildrop); + +int dpni_get_taildrop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type q_type, + u8 tc, + u8 q_index, + struct dpni_taildrop *taildrop); + +/** + * struct dpni_rule_cfg - Rule configuration for table lookup + * @key_iova: I/O virtual address of the key (must be in DMA-able memory) + * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) + * @key_size: key and mask size (in bytes) + */ +struct dpni_rule_cfg { + u64 key_iova; + u64 mask_iova; + u8 key_size; +}; + +/** + * Discard matching traffic. If set, this takes precedence over any other + * configuration and matching traffic is always discarded. + */ + #define DPNI_FS_OPT_DISCARD 0x1 + +/** + * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to + * override the FLC value set per queue. + * For more details check the Frame Descriptor section in the hardware + * documentation. + */ +#define DPNI_FS_OPT_SET_FLC 0x2 + +/** + * Indicates whether the 6 lowest significant bits of FLC are used for stash + * control. If set, the 6 least significant bits in value are interpreted as + * follows: + * - bits 0-1: indicates the number of 64 byte units of context that are + * stashed. FLC value is interpreted as a memory address in this case, + * excluding the 6 LS bits. + * - bits 2-3: indicates the number of 64 byte units of frame annotation + * to be stashed. Annotation is placed at FD[ADDR]. + * - bits 4-5: indicates the number of 64 byte units of frame data to be + * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET]. + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified. + */ +#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4 + +/** + * struct dpni_fs_action_cfg - Action configuration for table look-up + * @flc: FLC value for traffic matching this rule. Please check the + * Frame Descriptor section in the hardware documentation for + * more information. + * @flow_id: Identifies the Rx queue used for matching traffic. Supported + * values are in range 0 to num_queue-1. + * @options: Any combination of DPNI_FS_OPT_ values. + */ +struct dpni_fs_action_cfg { + u64 flc; + u16 flow_id; + u16 options; +}; + +int dpni_add_fs_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + u16 index, + const struct dpni_rule_cfg *cfg, + const struct dpni_fs_action_cfg *action); + +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + const struct dpni_rule_cfg *cfg); + +int dpni_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver); + +#endif /* __FSL_DPNI_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h new file mode 100644 index 000000000000..9af4ac71f347 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2018 NXP + */ + +#ifndef _FSL_DPRTC_CMD_H +#define _FSL_DPRTC_CMD_H + +/* Command versioning */ +#define DPRTC_CMD_BASE_VERSION 1 +#define DPRTC_CMD_ID_OFFSET 4 + +#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION) + +/* Command IDs */ +#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800) +#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810) + +#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1) +#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2) +#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3) +#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4) + +#pragma pack(push, 1) +struct dprtc_cmd_open { + __le32 dprtc_id; +}; + +struct dprtc_get_freq_compensation { + __le32 freq_compensation; +}; + +struct dprtc_time { + __le64 time; +}; + +#pragma pack(pop) + +#endif /* _FSL_DPRTC_CMD_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.c b/drivers/net/ethernet/freescale/dpaa2/dprtc.c new file mode 100644 index 000000000000..c13e09bc7b9d --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2018 NXP + */ + +#include <linux/fsl/mc.h> + +#include "dprtc.h" +#include "dprtc-cmd.h" + +/** + * dprtc_open() - Open a control session for the specified object. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dprtc_id: DPRTC unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dprtc_create function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dprtc_id, + u16 *token) +{ + struct dprtc_cmd_open *cmd_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dprtc_cmd_open *)cmd.params; + cmd_params->dprtc_id = cpu_to_le32(dprtc_id); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dprtc_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags, + token); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_set_freq_compensation() - Sets a new frequency compensation value. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @freq_compensation: The new frequency compensation value to set. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u32 freq_compensation) +{ + struct dprtc_get_freq_compensation *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION, + cmd_flags, + token); + cmd_params = (struct dprtc_get_freq_compensation *)cmd.params; + cmd_params->freq_compensation = cpu_to_le32(freq_compensation); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_get_freq_compensation() - Retrieves the frequency compensation value + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @freq_compensation: Frequency compensation value + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u32 *freq_compensation) +{ + struct dprtc_get_freq_compensation *rsp_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION, + cmd_flags, + token); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dprtc_get_freq_compensation *)cmd.params; + *freq_compensation = le32_to_cpu(rsp_params->freq_compensation); + + return 0; +} + +/** + * dprtc_get_time() - Returns the current RTC time. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @time: Current RTC time. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_time(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + uint64_t *time) +{ + struct dprtc_time *rsp_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME, + cmd_flags, + token); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dprtc_time *)cmd.params; + *time = le64_to_cpu(rsp_params->time); + + return 0; +} + +/** + * dprtc_set_time() - Updates current RTC time. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @time: New RTC time. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_time(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + uint64_t time) +{ + struct dprtc_time *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME, + cmd_flags, + token); + cmd_params = (struct dprtc_time *)cmd.params; + cmd_params->time = cpu_to_le64(time); + + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h new file mode 100644 index 000000000000..fe19618d6cdf --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2018 NXP + */ + +#ifndef __FSL_DPRTC_H +#define __FSL_DPRTC_H + +/* Data Path Real Time Counter API + * Contains initialization APIs and runtime control APIs for RTC + */ + +struct fsl_mc_io; + +int dprtc_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dprtc_id, + u16 *token); + +int dprtc_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u32 freq_compensation); + +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u32 *freq_compensation); + +int dprtc_get_time(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + uint64_t *time); + +int dprtc_set_time(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + uint64_t time); + +#endif /* __FSL_DPRTC_H */ diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 4778b663653e..bf80855dd0dd 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -452,6 +452,10 @@ struct bufdesc_ex { * initialisation. */ #define FEC_QUIRK_MIB_CLEAR (1 << 15) +/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers, + * those FIFO receive registers are resolved in other platforms. + */ +#define FEC_QUIRK_HAS_FRREG (1 << 16) struct bufdesc_prop { int qid; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index bf9b9fd6d2a0..6db69ba30dcd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -91,14 +91,16 @@ static struct platform_device_id fec_devtype[] = { .driver_data = 0, }, { .name = "imx25-fec", - .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR, + .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | + FEC_QUIRK_HAS_FRREG, }, { .name = "imx27-fec", - .driver_data = FEC_QUIRK_MIB_CLEAR, + .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG, }, { .name = "imx28-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | - FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC, + FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | + FEC_QUIRK_HAS_FRREG, }, { .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | @@ -1946,16 +1948,15 @@ static int fec_enet_mii_probe(struct net_device *ndev) /* mask with MAC supported features */ if (fep->quirks & FEC_QUIRK_HAS_GBIT) { - phy_dev->supported &= PHY_GBIT_FEATURES; - phy_dev->supported &= ~SUPPORTED_1000baseT_Half; + phy_set_max_speed(phy_dev, 1000); + phy_remove_link_mode(phy_dev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); #if !defined(CONFIG_M5272) - phy_dev->supported |= SUPPORTED_Pause; + phy_support_sym_pause(phy_dev); #endif } else - phy_dev->supported &= PHY_BASIC_FEATURES; - - phy_dev->advertising = phy_dev->supported; + phy_set_max_speed(phy_dev, 100); fep->link = 0; fep->full_duplex = 0; @@ -2055,8 +2056,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) node = of_get_child_by_name(pdev->dev.of_node, "mdio"); err = of_mdiobus_register(fep->mii_bus, node); - if (node) - of_node_put(node); + of_node_put(node); if (err) goto err_out_free_mdiobus; @@ -2164,7 +2164,13 @@ static void fec_enet_get_regs(struct net_device *ndev, memset(buf, 0, regs->len); for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { - off = fec_enet_register_offset[i] / 4; + off = fec_enet_register_offset[i]; + + if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && + !(fep->quirks & FEC_QUIRK_HAS_FRREG)) + continue; + + off >>= 2; buf[off] = readl(&theregs[off]); } } @@ -2230,13 +2236,8 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; - if (pause->rx_pause || pause->autoneg) { - ndev->phydev->supported |= ADVERTISED_Pause; - ndev->phydev->advertising |= ADVERTISED_Pause; - } else { - ndev->phydev->supported &= ~ADVERTISED_Pause; - ndev->phydev->advertising &= ~ADVERTISED_Pause; - } + phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause, + pause->autoneg); if (pause->autoneg) { if (netif_running(ndev)) diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 6d7269d87a85..b90bab72efdb 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -305,7 +305,8 @@ static int mpc52xx_fec_close(struct net_device *dev) * invariant will hold if you make sure that the netif_*_queue() * calls are done at the proper times. */ -static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); struct bcom_fec_bd *bd; diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index a847b9c3b31a..d79e4e009d63 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -393,11 +393,7 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, */ /* get local capabilities */ - lcl_adv = 0; - if (phy_dev->advertising & ADVERTISED_Pause) - lcl_adv |= ADVERTISE_PAUSE_CAP; - if (phy_dev->advertising & ADVERTISED_Asym_Pause) - lcl_adv |= ADVERTISE_PAUSE_ASYM; + lcl_adv = ethtool_adv_to_lcl_adv_t(phy_dev->advertising); /* get link partner capabilities */ rmt_adv = 0; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 2c2976a2dda6..7c548ed535da 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -481,7 +481,8 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, } #endif -static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); cbd_t __iomem *bdp; diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index ac2c3f6a12bc..82722d05fedb 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -446,8 +446,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) goto error; } - snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name, - (unsigned long long)res.start); + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%pOFn@%llx", np, + (unsigned long long)res.start); priv->map = of_iomap(np, 0); if (!priv->map) { diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index f27f9bae1a4a..3c8da1a18ba0 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -102,8 +102,6 @@ #include <linux/phy_fixed.h> #include <linux/of.h> #include <linux/of_net.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> #include "gianfar.h" @@ -112,7 +110,7 @@ const char gfar_driver_version[] = "2.0"; static int gfar_enet_open(struct net_device *dev); -static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); @@ -1814,8 +1812,8 @@ static int init_phy(struct net_device *dev) phydev->supported &= (GFAR_SUPPORTED | gigabit_support); phydev->advertising = phydev->supported; - /* Add support for flow control, but don't advertise it by default */ - phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + /* Add support for flow control */ + phy_support_asym_pause(phydev); /* disable EEE autoneg, EEE not supported by eTSEC */ memset(&edata, 0, sizeof(struct ethtool_eee)); @@ -2334,7 +2332,7 @@ static inline bool gfar_csum_errata_76(struct gfar_private *priv, /* This is called by the kernel when a frame is ready for transmission. * It is pointed to by the dev->hard_start_xmit function pointer */ -static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_tx_q *tx_queue = NULL; @@ -3658,12 +3656,7 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - lcl_adv = 0; - if (phydev->advertising & ADVERTISED_Pause) - lcl_adv |= ADVERTISE_PAUSE_CAP; - if (phydev->advertising & ADVERTISED_Asym_Pause) - lcl_adv |= ADVERTISE_PAUSE_ASYM; - + lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising); flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) val |= MACCFG1_TX_FLOW; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 395a5266ea30..0d76e15cd6dd 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -230,7 +230,7 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, /* Make sure we return a number greater than 0 * if usecs > 0 */ - return (usecs * 1000 + count - 1) / count; + return DIV_ROUND_UP(usecs * 1000, count); } /* Convert ethernet clock ticks to microseconds */ @@ -503,65 +503,44 @@ static int gfar_spauseparam(struct net_device *dev, struct gfar_private *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 oldadv, newadv; if (!phydev) return -ENODEV; - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - (epause->rx_pause != epause->tx_pause))) + if (!phy_validate_pause(phydev, epause)) return -EINVAL; priv->rx_pause_en = priv->tx_pause_en = 0; + phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); if (epause->rx_pause) { priv->rx_pause_en = 1; if (epause->tx_pause) { priv->tx_pause_en = 1; - /* FLOW_CTRL_RX & TX */ - newadv = ADVERTISED_Pause; - } else /* FLOW_CTLR_RX */ - newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; + } } else if (epause->tx_pause) { priv->tx_pause_en = 1; - /* FLOW_CTLR_TX */ - newadv = ADVERTISED_Asym_Pause; - } else - newadv = 0; + } if (epause->autoneg) priv->pause_aneg_en = 1; else priv->pause_aneg_en = 0; - oldadv = phydev->advertising & - (ADVERTISED_Pause | ADVERTISED_Asym_Pause); - if (oldadv != newadv) { - phydev->advertising &= - ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - phydev->advertising |= newadv; - if (phydev->autoneg) - /* inform link partner of our - * new flow ctrl settings - */ - return phy_start_aneg(phydev); - - if (!epause->autoneg) { - u32 tempval; - tempval = gfar_read(®s->maccfg1); - tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); - - priv->tx_actual_en = 0; - if (priv->tx_pause_en) { - priv->tx_actual_en = 1; - tempval |= MACCFG1_TX_FLOW; - } + if (!epause->autoneg) { + u32 tempval = gfar_read(®s->maccfg1); - if (priv->rx_pause_en) - tempval |= MACCFG1_RX_FLOW; - gfar_write(®s->maccfg1, tempval); + tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + + priv->tx_actual_en = 0; + if (priv->tx_pause_en) { + priv->tx_actual_en = 1; + tempval |= MACCFG1_TX_FLOW; } + + if (priv->rx_pause_en) + tempval |= MACCFG1_RX_FLOW; + gfar_write(®s->maccfg1, tempval); } return 0; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 22a817da861e..32e02700feaa 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1742,12 +1742,7 @@ static int init_phy(struct net_device *dev) if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) uec_configure_serdes(dev); - phydev->supported &= (SUPPORTED_MII | - SUPPORTED_Autoneg | - ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full); + phy_set_max_speed(phydev, SPEED_100); if (priv->max_speed == SPEED_1000) phydev->supported |= ADVERTISED_1000baseT_Full; @@ -3083,7 +3078,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) /* This is called by the kernel when a frame is ready for transmission. */ /* It is pointed to by the dev->hard_start_xmit function pointer */ -static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ucc_geth_private *ugeth = netdev_priv(dev); #ifdef CONFIG_UGETH_TX_ON_DEMAND diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 14374a856d30..be268dcde8fa 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -422,7 +422,8 @@ static void hip04_start_tx_timer(struct hip04_priv *priv) ns, HRTIMER_MODE_REL); } -static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct hip04_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index c5727003af8c..471805ea363b 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -736,7 +736,7 @@ static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv, return 0; } -static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct hix5hd2_priv *priv = netdev_priv(dev); struct hix5hd2_desc *desc; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 09e4061d1fa6..aaf72c055711 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -319,7 +319,7 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en) hns_gmac_set_uc_match(mac_drv, en); } -int hns_gmac_wait_fifo_clean(void *mac_drv) +static int hns_gmac_wait_fifo_clean(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; int wait_cnt; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 6ed6f142427e..3613e400e816 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -837,8 +837,8 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) */ put_device(&mac_cb->phy_dev->mdio.dev); - dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", - mac_cb->mac_id, np->name); + dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n", + mac_cb->mac_id, np); } of_node_put(np); @@ -855,8 +855,8 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) * if the phy_dev is found */ put_device(&mac_cb->phy_dev->mdio.dev); - dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", - mac_cb->mac_id, np->name); + dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n", + mac_cb->mac_id, np); } of_node_put(np); diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index be9dc08ccf67..038326cfda93 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -46,9 +46,6 @@ enum hclge_mbx_mac_vlan_subcode { HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ - HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */ - HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */ - HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */ }; /* below are per-VF vlan cfg subcodes */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index fff5be8078ac..781e5dee3c70 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -29,8 +29,8 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, return false; } -static void hnae3_set_client_init_flag(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev, int inited) +void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, int inited) { switch (client->type) { case HNAE3_CLIENT_KNIC: @@ -46,6 +46,7 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client, break; } } +EXPORT_SYMBOL(hnae3_set_client_init_flag); static int hnae3_get_client_init_flag(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) @@ -86,14 +87,11 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, /* now, (un-)instantiate client by calling lower layer */ if (is_reg) { ret = ae_dev->ops->init_client_instance(client, ae_dev); - if (ret) { + if (ret) dev_err(&ae_dev->pdev->dev, "fail to instantiate client, ret = %d\n", ret); - return ret; - } - hnae3_set_client_init_flag(client, ae_dev, 1); - return 0; + return ret; } if (hnae3_get_client_init_flag(client, ae_dev)) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 67befff0bfc5..e82e4ca20620 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -51,6 +51,7 @@ #define HNAE3_KNIC_CLIENT_INITED_B 0x3 #define HNAE3_UNIC_CLIENT_INITED_B 0x4 #define HNAE3_ROCE_CLIENT_INITED_B 0x5 +#define HNAE3_DEV_SUPPORT_FD_B 0x6 #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) @@ -61,6 +62,9 @@ #define hnae3_dev_dcb_supported(hdev) \ hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) +#define hnae3_dev_fd_supported(hdev) \ + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B) + #define ring_ptr_move_fw(ring, p) \ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) #define ring_ptr_move_bw(ring, p) \ @@ -84,10 +88,11 @@ struct hnae3_queue { /*hnae3 loop mode*/ enum hnae3_loop { - HNAE3_MAC_INTER_LOOP_MAC, - HNAE3_MAC_INTER_LOOP_SERDES, - HNAE3_MAC_INTER_LOOP_PHY, - HNAE3_MAC_LOOP_NONE, + HNAE3_LOOP_APP, + HNAE3_LOOP_SERIAL_SERDES, + HNAE3_LOOP_PARALLEL_SERDES, + HNAE3_LOOP_PHY, + HNAE3_LOOP_NONE, }; enum hnae3_client_type { @@ -107,6 +112,7 @@ enum hnae3_media_type { HNAE3_MEDIA_TYPE_FIBER, HNAE3_MEDIA_TYPE_COPPER, HNAE3_MEDIA_TYPE_BACKPLANE, + HNAE3_MEDIA_TYPE_NONE, }; enum hnae3_reset_notify_type { @@ -173,6 +179,7 @@ struct hnae3_ae_dev { struct list_head node; u32 flag; enum hnae3_dev_type dev_type; + enum hnae3_reset_type reset_type; void *priv; }; @@ -337,6 +344,8 @@ struct hnae3_ae_ops { void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); int (*set_mac_addr)(struct hnae3_handle *handle, void *p, bool is_first); + int (*do_ioctl)(struct hnae3_handle *handle, + struct ifreq *ifr, int cmd); int (*add_uc_addr)(struct hnae3_handle *handle, const unsigned char *addr); int (*rm_uc_addr)(struct hnae3_handle *handle, @@ -346,8 +355,6 @@ struct hnae3_ae_ops { const unsigned char *addr); int (*rm_mc_addr)(struct hnae3_handle *handle, const unsigned char *addr); - int (*update_mta_status)(struct hnae3_handle *handle); - void (*set_tso_stats)(struct hnae3_handle *handle, int enable); void (*update_stats)(struct hnae3_handle *handle, struct net_device_stats *net_stats); @@ -395,11 +402,11 @@ struct hnae3_ae_ops { int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, u16 vlan, u8 qos, __be16 proto); int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable); - void (*reset_event)(struct hnae3_handle *handle); + void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle); void (*get_channels)(struct hnae3_handle *handle, struct ethtool_channels *ch); void (*get_tqps_and_rss_info)(struct hnae3_handle *h, - u16 *free_tqps, u16 *max_rss_size); + u16 *alloc_tqps, u16 *max_rss_size); int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num); void (*get_flowctrl_adv)(struct hnae3_handle *handle, u32 *flowctrl_adv); @@ -408,7 +415,21 @@ struct hnae3_ae_ops { void (*get_link_mode)(struct hnae3_handle *handle, unsigned long *supported, unsigned long *advertising); - void (*get_port_type)(struct hnae3_handle *handle, u8 *port_type); + int (*add_fd_entry)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*del_fd_entry)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + void (*del_all_fd_entries)(struct hnae3_handle *handle, + bool clear_list); + int (*get_fd_rule_cnt)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*get_fd_rule_info)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*get_fd_all_rules)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd, u32 *rule_locs); + int (*restore_fd_rules)(struct hnae3_handle *handle); + void (*enable_fd)(struct hnae3_handle *handle, bool enable); + pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev); }; struct hnae3_dcb_ops { @@ -459,6 +480,7 @@ struct hnae3_knic_private_info { const struct hnae3_dcb_ops *dcb_ops; u16 int_rl_setting; + enum pkt_hash_types rss_type; }; struct hnae3_roce_private_info { @@ -476,10 +498,20 @@ struct hnae3_unic_private_info { struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ }; -#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0) +#define HNAE3_SUPPORT_APP_LOOPBACK BIT(0) #define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1) -#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2) +#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2) #define HNAE3_SUPPORT_VF BIT(3) +#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4) + +#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */ +#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */ +#define HNAE3_BPE BIT(2) /* broadcast promisc enable */ +#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */ +#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */ +#define HNAE3_VLAN_FLTR BIT(5) /* enable vlan filter */ +#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE) +#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE) struct hnae3_handle { struct hnae3_client *client; @@ -499,6 +531,8 @@ struct hnae3_handle { }; u32 numa_node_mask; /* for multi-chip support */ + + u8 netdev_flags; }; #define hnae3_set_field(origin, mask, shift, val) \ @@ -521,4 +555,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); void hnae3_unregister_client(struct hnae3_client *client); int hnae3_register_client(struct hnae3_client *client); + +void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, int inited); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 955c4ab18b03..32f3aca814e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -9,6 +9,7 @@ #include <linux/ipv6.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/aer.h> #include <linux/skbuff.h> #include <linux/sctp.h> #include <linux/vermagic.h> @@ -21,6 +22,7 @@ static void hns3_clear_all_ring(struct hnae3_handle *h); static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); +static void hns3_remove_hw_addr(struct net_device *netdev); static const char hns3_driver_name[] = "hns3"; const char hns3_driver_version[] = VERMAGIC_STRING; @@ -66,6 +68,23 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector) return IRQ_HANDLED; } +/* This callback function is used to set affinity changes to the irq affinity + * masks when the irq_set_affinity_notifier function is used. + */ +static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct hns3_enet_tqp_vector *tqp_vectors = + container_of(notify, struct hns3_enet_tqp_vector, + affinity_notify); + + tqp_vectors->affinity_mask = *mask; +} + +static void hns3_nic_irq_affinity_release(struct kref *ref) +{ +} + static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) { struct hns3_enet_tqp_vector *tqp_vectors; @@ -77,6 +96,10 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) continue; + /* clear the affinity notifier and affinity mask */ + irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL); + irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); + /* release the irq resource */ free_irq(tqp_vectors->vector_irq, tqp_vectors); tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; @@ -127,6 +150,15 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) return ret; } + tqp_vectors->affinity_notify.notify = + hns3_nic_irq_affinity_notify; + tqp_vectors->affinity_notify.release = + hns3_nic_irq_affinity_release; + irq_set_affinity_notifier(tqp_vectors->vector_irq, + &tqp_vectors->affinity_notify); + irq_set_affinity_hint(tqp_vectors->vector_irq, + &tqp_vectors->affinity_mask); + tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; } @@ -195,8 +227,6 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, struct hns3_nic_priv *priv) { - struct hnae3_handle *h = priv->ae_handle; - /* initialize the configuration for interrupt coalescing. * 1. GL (Interrupt Gap Limiter) * 2. RL (Interrupt Rate Limiter) @@ -209,9 +239,6 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; - /* Default: disable RL */ - h->kinfo.int_rl_setting = 0; - tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; @@ -277,12 +304,12 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) static u16 hns3_get_max_available_channels(struct hnae3_handle *h) { - u16 free_tqps, max_rss_size, max_tqps; + u16 alloc_tqps, max_rss_size, rss_size; - h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); - max_tqps = h->kinfo.num_tc * max_rss_size; + h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); + rss_size = alloc_tqps / h->kinfo.num_tc; - return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); + return min_t(u16, rss_size, max_rss_size); } static int hns3_nic_net_up(struct net_device *netdev) @@ -433,26 +460,81 @@ static int hns3_nic_mc_unsync(struct net_device *netdev, return 0; } +static u8 hns3_get_netdev_flags(struct net_device *netdev) +{ + u8 flags = 0; + + if (netdev->flags & IFF_PROMISC) { + flags = HNAE3_USER_UPE | HNAE3_USER_MPE; + } else { + flags |= HNAE3_VLAN_FLTR; + if (netdev->flags & IFF_ALLMULTI) + flags |= HNAE3_USER_MPE; + } + + return flags; +} + static void hns3_nic_set_rx_mode(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); + u8 new_flags; + int ret; - if (h->ae_algo->ops->set_promisc_mode) { - if (netdev->flags & IFF_PROMISC) - h->ae_algo->ops->set_promisc_mode(h, true, true); - else if (netdev->flags & IFF_ALLMULTI) - h->ae_algo->ops->set_promisc_mode(h, false, true); - else - h->ae_algo->ops->set_promisc_mode(h, false, false); - } - if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) + new_flags = hns3_get_netdev_flags(netdev); + + ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); + if (ret) { netdev_err(netdev, "sync uc address fail\n"); + if (ret == -ENOSPC) + new_flags |= HNAE3_OVERFLOW_UPE; + } + if (netdev->flags & IFF_MULTICAST) { - if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) + ret = __dev_mc_sync(netdev, hns3_nic_mc_sync, + hns3_nic_mc_unsync); + if (ret) { netdev_err(netdev, "sync mc address fail\n"); + if (ret == -ENOSPC) + new_flags |= HNAE3_OVERFLOW_MPE; + } + } - if (h->ae_algo->ops->update_mta_status) - h->ae_algo->ops->update_mta_status(h); + hns3_update_promisc_mode(netdev, new_flags); + /* User mode Promisc mode enable and vlan filtering is disabled to + * let all packets in. MAC-VLAN Table overflow Promisc enabled and + * vlan fitering is enabled + */ + hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR); + h->netdev_flags = new_flags; +} + +void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->set_promisc_mode) { + h->ae_algo->ops->set_promisc_mode(h, + promisc_flags & HNAE3_UPE, + promisc_flags & HNAE3_MPE); + } +} + +void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + bool last_state; + + if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) { + last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; + if (enable != last_state) { + netdev_info(netdev, + "%s vlan filter\n", + enable ? "enable" : "disable"); + h->ae_algo->ops->enable_vlan_filter(h, enable); + } } } @@ -896,35 +978,28 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, } static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type) + int size, int frag_end, enum hns_desc_type type) { struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + struct device *dev = ring_to_dev(ring); u32 ol_type_vlan_len_msec = 0; u16 bdtp_fe_sc_vld_ra_ri = 0; + struct skb_frag_struct *frag; + unsigned int frag_buf_num; u32 type_cs_vlan_tso = 0; struct sk_buff *skb; u16 inner_vtag = 0; u16 out_vtag = 0; + unsigned int k; + int sizeoflast; u32 paylen = 0; + dma_addr_t dma; u16 mss = 0; u8 ol4_proto; u8 il4_proto; int ret; - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ - desc_cb->priv = priv; - desc_cb->length = size; - desc_cb->dma = dma; - desc_cb->type = type; - - /* now, fill the descriptor */ - desc->addr = cpu_to_le64(dma); - desc->tx.send_size = cpu_to_le16((u16)size); - hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); - desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); - if (type == DESC_TYPE_SKB) { skb = (struct sk_buff *)priv; paylen = skb->len; @@ -965,38 +1040,47 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, desc->tx.mss = cpu_to_le16(mss); desc->tx.vlan_tag = cpu_to_le16(inner_vtag); desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); - } - /* move ring pointer to next.*/ - ring_ptr_move_fw(ring, next_to_use); + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + } else { + frag = (struct skb_frag_struct *)priv; + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + } - return 0; -} + if (dma_mapping_error(ring->dev, dma)) { + ring->stats.sw_err_cnt++; + return -ENOMEM; + } -static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type) -{ - unsigned int frag_buf_num; - unsigned int k; - int sizeoflast; - int ret; + desc_cb->length = size; frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; sizeoflast = size % HNS3_MAX_BD_SIZE; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; - /* When the frag size is bigger than hardware, split this frag */ + /* When frag size is bigger than hardware limit, split this frag */ for (k = 0; k < frag_buf_num; k++) { - ret = hns3_fill_desc(ring, priv, - (k == frag_buf_num - 1) ? - sizeoflast : HNS3_MAX_BD_SIZE, - dma + HNS3_MAX_BD_SIZE * k, - frag_end && (k == frag_buf_num - 1) ? 1 : 0, - (type == DESC_TYPE_SKB && !k) ? - DESC_TYPE_SKB : DESC_TYPE_PAGE); - if (ret) - return ret; + /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ + desc_cb->priv = priv; + desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; + desc_cb->type = (type == DESC_TYPE_SKB && !k) ? + DESC_TYPE_SKB : DESC_TYPE_PAGE; + + /* now, fill the descriptor */ + desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); + desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? + (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); + hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, + frag_end && (k == frag_buf_num - 1) ? + 1 : 0); + desc->tx.bdtp_fe_sc_vld_ra_ri = + cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + + /* move ring pointer to next.*/ + ring_ptr_move_fw(ring, next_to_use); + + desc_cb = &ring->desc_cb[ring->next_to_use]; + desc = &ring->desc[ring->next_to_use]; } return 0; @@ -1044,7 +1128,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, /* No. of segments (plus a header) */ buf_num = skb_shinfo(skb)->nr_frags + 1; - if (buf_num > ring_space(ring)) + if (unlikely(ring_space(ring) < buf_num)) return -EBUSY; *bnum = buf_num; @@ -1052,7 +1136,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, return 0; } -static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) +static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) { struct device *dev = ring_to_dev(ring); unsigned int i; @@ -1068,12 +1152,14 @@ static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) ring->desc_cb[ring->next_to_use].dma, ring->desc_cb[ring->next_to_use].length, DMA_TO_DEVICE); - else + else if (ring->desc_cb[ring->next_to_use].length) dma_unmap_page(dev, ring->desc_cb[ring->next_to_use].dma, ring->desc_cb[ring->next_to_use].length, DMA_TO_DEVICE); + ring->desc_cb[ring->next_to_use].length = 0; + /* rollback one */ ring_ptr_move_bw(ring, next_to_use); } @@ -1085,12 +1171,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) struct hns3_nic_ring_data *ring_data = &tx_ring_data(priv, skb->queue_mapping); struct hns3_enet_ring *ring = ring_data->ring; - struct device *dev = priv->dev; struct netdev_queue *dev_queue; struct skb_frag_struct *frag; int next_to_use_head; int next_to_use_frag; - dma_addr_t dma; int buf_num; int seg_num; int size; @@ -1125,35 +1209,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) next_to_use_head = ring->next_to_use; - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) { - netdev_err(netdev, "TX head DMA map failed\n"); - ring->stats.sw_err_cnt++; - goto out_err_tx_ok; - } - - ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, - DESC_TYPE_SKB); + ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, + DESC_TYPE_SKB); if (ret) - goto head_dma_map_err; + goto head_fill_err; next_to_use_frag = ring->next_to_use; /* Fill the fragments */ for (i = 1; i < seg_num; i++) { frag = &skb_shinfo(skb)->frags[i - 1]; size = skb_frag_size(frag); - dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) { - netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); - ring->stats.sw_err_cnt++; - goto frag_dma_map_err; - } - ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, - seg_num - 1 == i ? 1 : 0, - DESC_TYPE_PAGE); + + ret = priv->ops.fill_desc(ring, frag, size, + seg_num - 1 == i ? 1 : 0, + DESC_TYPE_PAGE); if (ret) - goto frag_dma_map_err; + goto frag_fill_err; } /* Complete translate all packets */ @@ -1166,11 +1238,11 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; -frag_dma_map_err: - hns_nic_dma_unmap(ring, next_to_use_frag); +frag_fill_err: + hns3_clear_desc(ring, next_to_use_frag); -head_dma_map_err: - hns_nic_dma_unmap(ring, next_to_use_head); +head_fill_err: + hns3_clear_desc(ring, next_to_use_head); out_err_tx_ok: dev_kfree_skb_any(skb); @@ -1209,6 +1281,20 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return 0; } +static int hns3_nic_do_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!h->ae_algo->ops->do_ioctl) + return -EOPNOTSUPP; + + return h->ae_algo->ops->do_ioctl(h, ifr, cmd); +} + static int hns3_nic_set_features(struct net_device *netdev, netdev_features_t features) { @@ -1218,13 +1304,10 @@ static int hns3_nic_set_features(struct net_device *netdev, int ret; if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { - if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { - priv->ops.fill_desc = hns3_fill_desc_tso; + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - } else { - priv->ops.fill_desc = hns3_fill_desc; + else priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; - } } if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && @@ -1246,6 +1329,13 @@ static int hns3_nic_set_features(struct net_device *netdev, return ret; } + if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { + if (features & NETIF_F_NTUPLE) + h->ae_algo->ops->enable_fd(h, true); + else + h->ae_algo->ops->enable_fd(h, false); + } + netdev->features = features; return 0; } @@ -1447,13 +1537,11 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) } ret = h->ae_algo->ops->set_mtu(h, new_mtu); - if (ret) { + if (ret) netdev_err(netdev, "failed to change MTU in hardware %d\n", ret); - return ret; - } - - netdev->mtu = new_mtu; + else + netdev->mtu = new_mtu; /* if the netdev was running earlier, bring it up again */ if (if_running && hns3_nic_net_open(netdev)) @@ -1526,7 +1614,7 @@ static void hns3_nic_net_timeout(struct net_device *ndev) /* request the reset */ if (h->ae_algo->ops->reset_event) - h->ae_algo->ops->reset_event(h); + h->ae_algo->ops->reset_event(h->pdev, h); } static const struct net_device_ops hns3_nic_netdev_ops = { @@ -1535,6 +1623,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_start_xmit = hns3_nic_net_xmit, .ndo_tx_timeout = hns3_nic_net_timeout, .ndo_set_mac_address = hns3_nic_net_set_mac_address, + .ndo_do_ioctl = hns3_nic_do_ioctl, .ndo_change_mtu = hns3_nic_change_mtu, .ndo_set_features = hns3_nic_set_features, .ndo_get_stats64 = hns3_nic_get_stats64, @@ -1584,6 +1673,13 @@ static void hns3_disable_sriov(struct pci_dev *pdev) pci_disable_sriov(pdev); } +static void hns3_get_dev_capability(struct pci_dev *pdev, + struct hnae3_ae_dev *ae_dev) +{ + if (pdev->revision >= 0x21) + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1); +} + /* hns3_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in hns3_pci_tbl @@ -1609,6 +1705,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ae_dev->pdev = pdev; ae_dev->flag = ent->driver_data; ae_dev->dev_type = HNAE3_DEV_KNIC; + ae_dev->reset_type = HNAE3_NONE_RESET; + hns3_get_dev_capability(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev); hnae3_register_ae_dev(ae_dev); @@ -1662,12 +1760,72 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) return 0; } +static void hns3_shutdown(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + hnae3_unregister_ae_dev(ae_dev); + devm_kfree(&pdev->dev, ae_dev); + pci_set_drvdata(pdev, NULL); + + if (system_state == SYSTEM_POWER_OFF) + pci_set_power_state(pdev, PCI_D3hot); +} + +static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + pci_ers_result_t ret; + + dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (!ae_dev) { + dev_err(&pdev->dev, + "Can't recover - error happened during device init\n"); + return PCI_ERS_RESULT_NONE; + } + + if (ae_dev->ops->process_hw_error) + ret = ae_dev->ops->process_hw_error(ae_dev); + else + return PCI_ERS_RESULT_NONE; + + return ret; +} + +static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct device *dev = &pdev->dev; + + dev_info(dev, "requesting reset due to PCI error\n"); + + /* request the reset */ + if (ae_dev->ops->reset_event) { + ae_dev->ops->reset_event(pdev, NULL); + return PCI_ERS_RESULT_RECOVERED; + } + + return PCI_ERS_RESULT_DISCONNECT; +} + +static const struct pci_error_handlers hns3_err_handler = { + .error_detected = hns3_error_detected, + .slot_reset = hns3_slot_reset, +}; + static struct pci_driver hns3_driver = { .name = hns3_driver_name, .id_table = hns3_pci_tbl, .probe = hns3_probe, .remove = hns3_remove, + .shutdown = hns3_shutdown, .sriov_configure = hns3_pci_sriov_configure, + .err_handler = &hns3_err_handler, }; /* set default feature to hns3 */ @@ -1682,7 +1840,7 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; @@ -1694,24 +1852,30 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; - if (pdev->revision != 0x20) + if (pdev->revision >= 0x21) { netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + if (!(h->flags & HNAE3_SUPPORT_VF)) { + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->features |= NETIF_F_NTUPLE; + } + } } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, @@ -1749,7 +1913,7 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, cb->length, ring_to_dma_dir(ring)); - if (dma_mapping_error(ring_to_dev(ring), cb->dma)) + if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) return -EIO; return 0; @@ -1761,7 +1925,7 @@ static void hns3_unmap_buffer(struct hns3_enet_ring *ring, if (cb->type == DESC_TYPE_SKB) dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, ring_to_dma_dir(ring)); - else + else if (cb->length) dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, ring_to_dma_dir(ring)); } @@ -1912,9 +2076,10 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) return u > c ? (h > c && h <= u) : (h > c || h <= u); } -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) +void hns3_clean_tx_ring(struct hns3_enet_ring *ring) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); struct netdev_queue *dev_queue; int bytes, pkts; int head; @@ -1923,7 +2088,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) rmb(); /* Make sure head is ready before touch any data */ if (is_ring_empty(ring) || head == ring->next_to_clean) - return true; /* no data to poll */ + return; /* no data to poll */ if (unlikely(!is_valid_clean_head(ring, head))) { netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, @@ -1932,16 +2097,15 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) u64_stats_update_begin(&ring->syncp); ring->stats.io_err_cnt++; u64_stats_update_end(&ring->syncp); - return true; + return; } bytes = 0; pkts = 0; - while (head != ring->next_to_clean && budget) { + while (head != ring->next_to_clean) { hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); /* Issue prefetch for next Tx descriptor */ prefetch(&ring->desc_cb[ring->next_to_clean]); - budget--; } ring->tqp_vector->tx_group.total_bytes += bytes; @@ -1961,13 +2125,12 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) * sees the new next_to_clean. */ smp_mb(); - if (netif_tx_queue_stopped(dev_queue)) { + if (netif_tx_queue_stopped(dev_queue) && + !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { netif_tx_wake_queue(dev_queue); ring->stats.restart_queue++; } } - - return !!budget; } static int hns3_desc_unused(struct hns3_enet_ring *ring) @@ -2092,7 +2255,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { - netdev_err(netdev, "L3/L4 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l3l4_csum_err++; u64_stats_update_end(&ring->syncp); @@ -2121,6 +2283,8 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, l4_type == HNS3_L4_TYPE_SCTP)) skb->ip_summed = CHECKSUM_UNNECESSARY; break; + default: + break; } } @@ -2129,18 +2293,18 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) napi_gro_receive(&ring->tqp_vector->napi, skb); } -static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, - struct hns3_desc *desc, u32 l234info) +static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, u32 l234info, + u16 *vlan_tag) { struct pci_dev *pdev = ring->tqp->handle->pdev; - u16 vlan_tag; if (pdev->revision == 0x20) { - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - if (!(vlan_tag & VLAN_VID_MASK)) - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(*vlan_tag & VLAN_VID_MASK)) + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - return vlan_tag; + return (*vlan_tag != 0); } #define HNS3_STRP_OUTER_VLAN 0x1 @@ -2149,17 +2313,29 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - break; + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + return true; case HNS3_STRP_INNER_VLAN: - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - break; + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + return true; default: - vlan_tag = 0; - break; + return false; } +} + +static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + struct hnae3_handle *handle = ring->tqp->handle; + enum pkt_hash_types rss_type; - return vlan_tag; + if (le32_to_cpu(desc->rx.rss_hash)) + rss_type = handle->kinfo.rss_type; + else + rss_type = PKT_HASH_TYPE_NONE; + + skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type); } static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, @@ -2261,16 +2437,13 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { u16 vlan_tag; - vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info); - if (vlan_tag & VLAN_VID_MASK) + if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { - netdev_err(netdev, "no valid bd,%016llx,%016llx\n", - ((u64 *)desc)[0], ((u64 *)desc)[1]); u64_stats_update_begin(&ring->syncp); ring->stats.non_vld_descs++; u64_stats_update_end(&ring->syncp); @@ -2281,7 +2454,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, if (unlikely((!desc->rx.pkt_len) || hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { - netdev_err(netdev, "truncated pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.err_pkt_len++; u64_stats_update_end(&ring->syncp); @@ -2291,7 +2463,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, } if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { - netdev_err(netdev, "L2 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l2_err++; u64_stats_update_end(&ring->syncp); @@ -2308,6 +2479,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ring->tqp_vector->rx_group.total_bytes += skb->len; hns3_rx_checksum(ring, skb, desc); + hns3_set_rx_skb_rss_type(ring, skb); + return 0; } @@ -2501,10 +2674,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ - hns3_for_each_ring(ring, tqp_vector->tx_group) { - if (!hns3_clean_tx_ring(ring, budget)) - clean_complete = false; - } + hns3_for_each_ring(ring, tqp_vector->tx_group) + hns3_clean_tx_ring(ring); /* make sure rx ring budget not smaller than 1 */ rx_budget = max(budget / tqp_vector->num_tqps, 1); @@ -2627,6 +2798,23 @@ static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, group->count++; } +static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) +{ + struct pci_dev *pdev = priv->ae_handle->pdev; + struct hns3_enet_tqp_vector *tqp_vector; + int num_vectors = priv->vector_num; + int numa_node; + int vector_i; + + numa_node = dev_to_node(&pdev->dev); + + for (vector_i = 0; vector_i < num_vectors; vector_i++) { + tqp_vector = &priv->tqp_vector[vector_i]; + cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), + &tqp_vector->affinity_mask); + } +} + static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) { struct hnae3_ring_chain_node vector_ring_chain; @@ -2635,6 +2823,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) int ret = 0; u16 i; + hns3_nic_set_cpumask(priv); + for (i = 0; i < priv->vector_num; i++) { tqp_vector = &priv->tqp_vector[i]; hns3_vector_gl_rl_init_hw(tqp_vector, priv); @@ -3069,38 +3259,48 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init) } -static void hns3_uninit_mac_addr(struct net_device *netdev) +static int hns3_restore_fd_rules(struct net_device *netdev) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = 0; - if (h->ae_algo->ops->rm_uc_addr) - h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr); + if (h->ae_algo->ops->restore_fd_rules) + ret = h->ae_algo->ops->restore_fd_rules(h); + + return ret; +} + +static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->del_all_fd_entries) + h->ae_algo->ops->del_all_fd_entries(h, clear_list); } static void hns3_nic_set_priv_ops(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); + priv->ops.fill_desc = hns3_fill_desc; if ((netdev->features & NETIF_F_TSO) || - (netdev->features & NETIF_F_TSO6)) { - priv->ops.fill_desc = hns3_fill_desc_tso; + (netdev->features & NETIF_F_TSO6)) priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - } else { - priv->ops.fill_desc = hns3_fill_desc; + else priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; - } } static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; + u16 alloc_tqps, max_rss_size; struct hns3_nic_priv *priv; struct net_device *netdev; int ret; - netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), - hns3_get_max_available_channels(handle)); + handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, + &max_rss_size); + netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); if (!netdev) return -ENOMEM; @@ -3189,9 +3389,13 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; + hns3_remove_hw_addr(netdev); + if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); + hns3_del_all_fd_rules(netdev, true); + hns3_force_clear_all_rx_ring(handle); ret = hns3_nic_uninit_vector_data(priv); @@ -3210,8 +3414,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) priv->ring_data = NULL; - hns3_uninit_mac_addr(netdev); - free_netdev(netdev); } @@ -3283,6 +3485,25 @@ static void hns3_recover_hw_addr(struct net_device *ndev) hns3_nic_mc_sync(ndev, ha->addr); } +static void hns3_remove_hw_addr(struct net_device *netdev) +{ + struct netdev_hw_addr_list *list; + struct netdev_hw_addr *ha, *tmp; + + hns3_nic_uc_unsync(netdev, netdev->dev_addr); + + /* go through and unsync uc_addr entries to the device */ + list = &netdev->uc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + hns3_nic_uc_unsync(netdev, ha->addr); + + /* go through and unsync mc_addr entries to the device */ + list = &netdev->mc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + if (ha->refcount > 1) + hns3_nic_mc_unsync(netdev, ha->addr); +} + static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { while (ring->next_to_clean != ring->next_to_use) { @@ -3419,6 +3640,31 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) return 0; } +static void hns3_store_coal(struct hns3_nic_priv *priv) +{ + /* ethtool only support setting and querying one coal + * configuation for now, so save the vector 0' coal + * configuation here in order to restore it. + */ + memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, + sizeof(struct hns3_enet_coalesce)); + memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, + sizeof(struct hns3_enet_coalesce)); +} + +static void hns3_restore_coal(struct hns3_nic_priv *priv) +{ + u16 vector_num = priv->vector_num; + int i; + + for (i = 0; i < vector_num; i++) { + memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, + sizeof(struct hns3_enet_coalesce)); + memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, + sizeof(struct hns3_enet_coalesce)); + } +} + static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; @@ -3452,19 +3698,27 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) { struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); + bool vlan_filter_enable; int ret; hns3_init_mac_addr(netdev, false); - hns3_nic_set_rx_mode(netdev); hns3_recover_hw_addr(netdev); + hns3_update_promisc_mode(netdev, handle->netdev_flags); + vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; + hns3_enable_vlan_filter(netdev, vlan_filter_enable); + /* Hardware table is only clear when pf resets */ if (!(handle->flags & HNAE3_SUPPORT_VF)) hns3_restore_vlan(netdev); + hns3_restore_fd_rules(netdev); + /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + hns3_restore_coal(priv); + ret = hns3_nic_init_vector_data(priv); if (ret) return ret; @@ -3480,6 +3734,7 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; @@ -3492,11 +3747,20 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) return ret; } + hns3_store_coal(priv); + ret = hns3_uninit_all_ring(priv); if (ret) netdev_err(netdev, "uninit ring error\n"); - hns3_uninit_mac_addr(netdev); + /* it is cumbersome for hardware to pick-and-choose entries for deletion + * from table space. Hence, for function reset software intervention is + * required to delete the entries + */ + if (hns3_dev_ongoing_func_reset(ae_dev)) { + hns3_remove_hw_addr(netdev); + hns3_del_all_fd_rules(netdev, false); + } return ret; } @@ -3526,24 +3790,7 @@ static int hns3_reset_notify(struct hnae3_handle *handle, return ret; } -static void hns3_restore_coal(struct hns3_nic_priv *priv, - struct hns3_enet_coalesce *tx, - struct hns3_enet_coalesce *rx) -{ - u16 vector_num = priv->vector_num; - int i; - - for (i = 0; i < vector_num; i++) { - memcpy(&priv->tqp_vector[i].tx_group.coal, tx, - sizeof(struct hns3_enet_coalesce)); - memcpy(&priv->tqp_vector[i].rx_group.coal, rx, - sizeof(struct hns3_enet_coalesce)); - } -} - -static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num, - struct hns3_enet_coalesce *tx, - struct hns3_enet_coalesce *rx) +static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); @@ -3561,7 +3808,7 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num, if (ret) goto err_alloc_vector; - hns3_restore_coal(priv, tx, rx); + hns3_restore_coal(priv); ret = hns3_nic_init_vector_data(priv); if (ret) @@ -3593,7 +3840,6 @@ int hns3_set_channels(struct net_device *netdev, struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; - struct hns3_enet_coalesce tx_coal, rx_coal; bool if_running = netif_running(netdev); u32 new_tqp_num = ch->combined_count; u16 org_tqp_num; @@ -3625,15 +3871,7 @@ int hns3_set_channels(struct net_device *netdev, goto open_netdev; } - /* Changing the tqp num may also change the vector num, - * ethtool only support setting and querying one coal - * configuation for now, so save the vector 0' coal - * configuation here in order to restore it. - */ - memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal, - sizeof(struct hns3_enet_coalesce)); - memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal, - sizeof(struct hns3_enet_coalesce)); + hns3_store_coal(priv); hns3_nic_dealloc_vector_data(priv); @@ -3641,10 +3879,9 @@ int hns3_set_channels(struct net_device *netdev, hns3_put_ring_config(priv); org_tqp_num = h->kinfo.num_tqps; - ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal); + ret = hns3_modify_tqp_num(netdev, new_tqp_num); if (ret) { - ret = hns3_modify_tqp_num(netdev, org_tqp_num, - &tx_coal, &rx_coal); + ret = hns3_modify_tqp_num(netdev, org_tqp_num); if (ret) { /* If revert to old tqp failed, fatal error occurred */ dev_err(&netdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index cb450d7ec8c1..71cfca132d0b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -419,8 +419,7 @@ struct hns3_nic_ring_data { struct hns3_nic_ops { int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type); + int size, int frag_end, enum hns_desc_type type); int (*maybe_stop_tx)(struct sk_buff **out_skb, int *bnum, struct hns3_enet_ring *ring); void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); @@ -491,7 +490,9 @@ struct hns3_enet_tqp_vector { struct hns3_enet_ring_group rx_group; struct hns3_enet_ring_group tx_group; + cpumask_t affinity_mask; u16 num_tqps; /* total number of tqps in TQP vector */ + struct irq_affinity_notify affinity_notify; char name[HNAE3_INT_NAME_LEN]; @@ -541,6 +542,8 @@ struct hns3_nic_priv { /* Vxlan/Geneve information */ struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct hns3_enet_coalesce tx_coal; + struct hns3_enet_coalesce rx_coal; }; union l3_hdr_info { @@ -581,6 +584,11 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) writel(value, reg_addr + reg); } +static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev) +{ + return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET)); +} + #define hns3_write_dev(a, reg, value) \ hns3_write_reg((a)->io_base, (reg), (value)) @@ -615,7 +623,7 @@ void hns3_ethtool_set_ops(struct net_device *netdev); int hns3_set_channels(struct net_device *netdev, struct ethtool_channels *ch); -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); +void hns3_clean_tx_ring(struct hns3_enet_ring *ring); int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); int hns3_nic_reset_all_ring(struct hnae3_handle *h); @@ -631,6 +639,9 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, u32 rl_value); +void hns3_enable_vlan_filter(struct net_device *netdev, bool enable); +void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags); + #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); #else diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index f70ee6910ee2..a4762c2b8ba1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -22,13 +22,13 @@ struct hns3_stats { static const struct hns3_stats hns3_txq_stats[] = { /* Tx per-queue statistics */ HNS3_TQP_STAT("io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("tx_dropped", sw_err_cnt), + HNS3_TQP_STAT("dropped", sw_err_cnt), HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("packets", tx_pkts), HNS3_TQP_STAT("bytes", tx_bytes), HNS3_TQP_STAT("errors", tx_err_cnt), - HNS3_TQP_STAT("tx_wake", restart_queue), - HNS3_TQP_STAT("tx_busy", tx_busy), + HNS3_TQP_STAT("wake", restart_queue), + HNS3_TQP_STAT("busy", tx_busy), }; #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) @@ -36,7 +36,7 @@ static const struct hns3_stats hns3_txq_stats[] = { static const struct hns3_stats hns3_rxq_stats[] = { /* Rx per-queue statistics */ HNS3_TQP_STAT("io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("rx_dropped", sw_err_cnt), + HNS3_TQP_STAT("dropped", sw_err_cnt), HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("packets", rx_pkts), HNS3_TQP_STAT("bytes", rx_bytes), @@ -53,7 +53,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) -#define HNS3_SELF_TEST_TYPE_NUM 2 +#define HNS3_SELF_TEST_TYPE_NUM 3 #define HNS3_NIC_LB_TEST_PKT_NUM 1 #define HNS3_NIC_LB_TEST_RING_ID 0 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128 @@ -71,6 +71,7 @@ struct hns3_link_mode_mapping { static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) { struct hnae3_handle *h = hns3_get_handle(ndev); + bool vlan_filter_enable; int ret; if (!h->ae_algo->ops->set_loopback || @@ -78,8 +79,9 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) return -EOPNOTSUPP; switch (loop) { - case HNAE3_MAC_INTER_LOOP_SERDES: - case HNAE3_MAC_INTER_LOOP_MAC: + case HNAE3_LOOP_SERIAL_SERDES: + case HNAE3_LOOP_PARALLEL_SERDES: + case HNAE3_LOOP_APP: ret = h->ae_algo->ops->set_loopback(h, loop, en); break; default: @@ -90,7 +92,14 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) if (ret) return ret; - h->ae_algo->ops->set_promisc_mode(h, en, en); + if (en) { + h->ae_algo->ops->set_promisc_mode(h, true, true); + } else { + /* recover promisc mode before loopback test */ + hns3_update_promisc_mode(ndev, h->netdev_flags); + vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true; + hns3_enable_vlan_filter(ndev, vlan_filter_enable); + } return ret; } @@ -100,41 +109,26 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) struct hnae3_handle *h = hns3_get_handle(ndev); int ret; - if (!h->ae_algo->ops->start) - return -EOPNOTSUPP; - ret = hns3_nic_reset_all_ring(h); if (ret) return ret; - ret = h->ae_algo->ops->start(h); - if (ret) { - netdev_err(ndev, - "hns3_lb_up ae start return error: %d\n", ret); - return ret; - } - ret = hns3_lp_setup(ndev, loop_mode, true); usleep_range(10000, 20000); - return ret; + return 0; } static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) { - struct hnae3_handle *h = hns3_get_handle(ndev); int ret; - if (!h->ae_algo->ops->stop) - return -EOPNOTSUPP; - ret = hns3_lp_setup(ndev, loop_mode, false); if (ret) { netdev_err(ndev, "lb_setup return error: %d\n", ret); return ret; } - h->ae_algo->ops->stop(h); usleep_range(10000, 20000); return 0; @@ -152,6 +146,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb) packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); + ethh->h_dest[5] += 0x1f; eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_ARP); skb_reset_mac_header(skb); @@ -214,7 +209,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, for (i = start_ringid; i <= end_ringid; i++) { struct hns3_enet_ring *ring = priv->ring_data[i].ring; - hns3_clean_tx_ring(ring, budget); + hns3_clean_tx_ring(ring); } } @@ -300,16 +295,21 @@ static void hns3_self_test(struct net_device *ndev, if (eth_test->flags != ETH_TEST_FL_OFFLINE) return; - st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC; - st_param[HNAE3_MAC_INTER_LOOP_MAC][1] = - h->flags & HNAE3_SUPPORT_MAC_LOOPBACK; + st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; + st_param[HNAE3_LOOP_APP][1] = + h->flags & HNAE3_SUPPORT_APP_LOOPBACK; - st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES; - st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] = - h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK; + st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES; + st_param[HNAE3_LOOP_SERIAL_SERDES][1] = + h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + + st_param[HNAE3_LOOP_PARALLEL_SERDES][0] = + HNAE3_LOOP_PARALLEL_SERDES; + st_param[HNAE3_LOOP_PARALLEL_SERDES][1] = + h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; if (if_running) - dev_close(ndev); + ndev->netdev_ops->ndo_stop(ndev); #if IS_ENABLED(CONFIG_VLAN_8021Q) /* Disable the vlan filter for selftest does not support it */ @@ -347,7 +347,7 @@ static void hns3_self_test(struct net_device *ndev, #endif if (if_running) - dev_open(ndev); + ndev->netdev_ops->ndo_open(ndev); } static int hns3_get_sset_count(struct net_device *netdev, int stringset) @@ -365,9 +365,10 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset) case ETH_SS_TEST: return ops->get_sset_count(h, stringset); - } - return 0; + default: + return -EOPNOTSUPP; + } } static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, @@ -383,7 +384,7 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, data[ETH_GSTRING_LEN - 1] = '\0'; /* first, prepend the prefix string */ - n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_", + n1 = snprintf(data, MAX_PREFIX_SIZE, "%s%d_", prefix, i); n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); size_left = (ETH_GSTRING_LEN - 1) - n1; @@ -431,6 +432,8 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) case ETH_SS_TEST: ops->get_strings(h, stringset, data); break; + default: + break; } } @@ -556,30 +559,72 @@ static int hns3_set_pauseparam(struct net_device *netdev, return -EOPNOTSUPP; } +static void hns3_get_ksettings(struct hnae3_handle *h, + struct ethtool_link_ksettings *cmd) +{ + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + + /* 1.auto_neg & speed & duplex from cmd */ + if (ops->get_ksettings_an_result) + ops->get_ksettings_an_result(h, + &cmd->base.autoneg, + &cmd->base.speed, + &cmd->base.duplex); + + /* 2.get link mode*/ + if (ops->get_link_mode) + ops->get_link_mode(h, + cmd->link_modes.supported, + cmd->link_modes.advertising); + + /* 3.mdix_ctrl&mdix get from phy reg */ + if (ops->get_mdix_mode) + ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, + &cmd->base.eth_tp_mdix); +} + static int hns3_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); - u32 flowctrl_adv = 0; + const struct hnae3_ae_ops *ops; + u8 media_type; u8 link_stat; if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; - /* 1.auto_neg & speed & duplex from cmd */ - if (netdev->phydev) { + ops = h->ae_algo->ops; + if (ops->get_media_type) + ops->get_media_type(h, &media_type); + else + return -EOPNOTSUPP; + + switch (media_type) { + case HNAE3_MEDIA_TYPE_NONE: + cmd->base.port = PORT_NONE; + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_FIBER: + cmd->base.port = PORT_FIBRE; + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_COPPER: + if (!netdev->phydev) + return -EOPNOTSUPP; + + cmd->base.port = PORT_TP; phy_ethtool_ksettings_get(netdev->phydev, cmd); + break; + default: + + netdev_warn(netdev, "Unknown media type"); return 0; } - if (h->ae_algo->ops->get_ksettings_an_result) - h->ae_algo->ops->get_ksettings_an_result(h, - &cmd->base.autoneg, - &cmd->base.speed, - &cmd->base.duplex); - else - return -EOPNOTSUPP; + /* mdio_support */ + cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; link_stat = hns3_get_link(netdev); if (!link_stat) { @@ -587,36 +632,6 @@ static int hns3_get_link_ksettings(struct net_device *netdev, cmd->base.duplex = DUPLEX_UNKNOWN; } - /* 2.get link mode and port type*/ - if (h->ae_algo->ops->get_link_mode) - h->ae_algo->ops->get_link_mode(h, - cmd->link_modes.supported, - cmd->link_modes.advertising); - - cmd->base.port = PORT_NONE; - if (h->ae_algo->ops->get_port_type) - h->ae_algo->ops->get_port_type(h, - &cmd->base.port); - - /* 3.mdix_ctrl&mdix get from phy reg */ - if (h->ae_algo->ops->get_mdix_mode) - h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, - &cmd->base.eth_tp_mdix); - /* 4.mdio_support */ - cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; - - /* 5.get flow control setttings */ - if (h->ae_algo->ops->get_flowctrl_adv) - h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv); - - if (flowctrl_adv & ADVERTISED_Pause) - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Pause); - - if (flowctrl_adv & ADVERTISED_Asym_Pause) - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Asym_Pause); - return 0; } @@ -671,12 +686,13 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir, if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) return -EOPNOTSUPP; - /* currently we only support Toeplitz hash */ - if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { - netdev_err(netdev, - "hash func not supported (only Toeplitz hash)\n"); + if ((h->pdev->revision == 0x20 && + hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) { + netdev_err(netdev, "hash func not supported\n"); return -EOPNOTSUPP; } + if (!indir) { netdev_err(netdev, "set rss failed for indir is empty\n"); @@ -692,20 +708,33 @@ static int hns3_get_rxnfc(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple) + if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = h->kinfo.rss_size; - break; + cmd->data = h->kinfo.num_tqps; + return 0; case ETHTOOL_GRXFH: - return h->ae_algo->ops->get_rss_tuple(h, cmd); + if (h->ae_algo->ops->get_rss_tuple) + return h->ae_algo->ops->get_rss_tuple(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRLCNT: + if (h->ae_algo->ops->get_fd_rule_cnt) + return h->ae_algo->ops->get_fd_rule_cnt(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRULE: + if (h->ae_algo->ops->get_fd_rule_info) + return h->ae_algo->ops->get_fd_rule_info(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRLALL: + if (h->ae_algo->ops->get_fd_all_rules) + return h->ae_algo->ops->get_fd_all_rules(h, cmd, + rule_locs); + return -EOPNOTSUPP; default: return -EOPNOTSUPP; } - - return 0; } static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, @@ -788,12 +817,22 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple) + if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_SRXFH: - return h->ae_algo->ops->set_rss_tuple(h, cmd); + if (h->ae_algo->ops->set_rss_tuple) + return h->ae_algo->ops->set_rss_tuple(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_SRXCLSRLINS: + if (h->ae_algo->ops->add_fd_entry) + return h->ae_algo->ops->add_fd_entry(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_SRXCLSRLDEL: + if (h->ae_algo->ops->del_fd_entry) + return h->ae_algo->ops->del_fd_entry(h, cmd); + return -EOPNOTSUPP; default: return -EOPNOTSUPP; } @@ -1047,6 +1086,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_ethtool_stats = hns3_get_stats, .get_sset_count = hns3_get_sset_count, .get_rxnfc = hns3_get_rxnfc, + .set_rxnfc = hns3_set_rxnfc, .get_rxfh_key_size = hns3_get_rss_key_size, .get_rxfh_indir_size = hns3_get_rss_indir_size, .get_rxfh = hns3_get_rss, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index cb8ddd043476..580e81743681 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -6,6 +6,6 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 obj-$(CONFIG_HNS3_HCLGE) += hclge.o -hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o +hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 821d4c2f84bd..872cd4bdd70d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -175,21 +175,22 @@ enum hclge_opcode_type { HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, + HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004, HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, - HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012, - - /* Multicast linear table commands */ - HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, - HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021, - HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022, - HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023, /* VLAN commands */ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102, + /* Flow Director commands */ + HCLGE_OPC_FD_MODE_CTRL = 0x1200, + HCLGE_OPC_FD_GET_ALLOCATION = 0x1201, + HCLGE_OPC_FD_KEY_CONFIG = 0x1202, + HCLGE_OPC_FD_TCAM_OP = 0x1203, + HCLGE_OPC_FD_AD_OP = 0x1204, + /* MDIO command */ HCLGE_OPC_MDIO_CONFIG = 0x1900, @@ -208,6 +209,28 @@ enum hclge_opcode_type { /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000, + + /* Error INT commands */ + HCLGE_TM_SCH_ECC_INT_EN = 0x0829, + HCLGE_TM_SCH_ECC_ERR_RINT_CMD = 0x082d, + HCLGE_TM_SCH_ECC_ERR_RINT_CE = 0x082f, + HCLGE_TM_SCH_ECC_ERR_RINT_NFE = 0x0830, + HCLGE_TM_SCH_ECC_ERR_RINT_FE = 0x0831, + HCLGE_TM_SCH_MBIT_ECC_INFO_CMD = 0x0833, + HCLGE_COMMON_ECC_INT_CFG = 0x1505, + HCLGE_IGU_EGU_TNL_INT_QUERY = 0x1802, + HCLGE_IGU_EGU_TNL_INT_EN = 0x1803, + HCLGE_IGU_EGU_TNL_INT_CLR = 0x1804, + HCLGE_IGU_COMMON_INT_QUERY = 0x1805, + HCLGE_IGU_COMMON_INT_EN = 0x1806, + HCLGE_IGU_COMMON_INT_CLR = 0x1807, + HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14, + HCLGE_TM_QCN_MEM_INT_INFO_CMD = 0x1A17, + HCLGE_PPP_CMD0_INT_CMD = 0x2100, + HCLGE_PPP_CMD1_INT_CMD = 0x2101, + HCLGE_NCSI_INT_QUERY = 0x2400, + HCLGE_NCSI_INT_EN = 0x2401, + HCLGE_NCSI_INT_CLR = 0x2402, }; #define HCLGE_TQP_REG_OFFSET 0x80000 @@ -395,6 +418,8 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24) #define HCLGE_CFG_SPEED_ABILITY_S 0 #define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0) +#define HCLGE_CFG_UMV_TBL_SPACE_S 16 +#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) struct hclge_cfg_param_cmd { __le32 offset; @@ -584,13 +609,12 @@ struct hclge_mac_vlan_tbl_entry_cmd { u8 rsv2[6]; }; -#define HCLGE_VLAN_MASK_EN_B 0 -struct hclge_mac_vlan_mask_entry_cmd { - u8 rsv0[2]; - u8 vlan_mask; - u8 rsv1; - u8 mac_mask[6]; - u8 rsv2[14]; +#define HCLGE_UMV_SPC_ALC_B 0 +struct hclge_umv_spc_alc_cmd { + u8 allocate; + u8 rsv1[3]; + __le32 space_size; + u8 rsv2[16]; }; #define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0) @@ -615,30 +639,6 @@ struct hclge_mac_mgr_tbl_entry_cmd { u8 rsv3[2]; }; -#define HCLGE_CFG_MTA_MAC_SEL_S 0 -#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0) -#define HCLGE_CFG_MTA_MAC_EN_B 7 -struct hclge_mta_filter_mode_cmd { - u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */ - u8 rsv[23]; -}; - -#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0 -struct hclge_cfg_func_mta_filter_cmd { - u8 accept; /* Only used lowest 1 bit */ - u8 function_id; - u8 rsv[22]; -}; - -#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0 -#define HCLGE_CFG_MTA_ITEM_IDX_S 0 -#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0) -struct hclge_cfg_func_mta_item_cmd { - __le16 item_idx; /* Only used lowest 12 bit */ - u8 accept; /* Only used lowest 1 bit */ - u8 rsv[21]; -}; - struct hclge_mac_vlan_add_cmd { __le16 flags; __le16 mac_addr_hi16; @@ -778,6 +778,7 @@ struct hclge_reset_cmd { }; #define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0) +#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2) #define HCLGE_CMD_SERDES_DONE_B BIT(0) #define HCLGE_CMD_SERDES_SUCCESS_B BIT(1) struct hclge_serdes_lb_cmd { @@ -818,6 +819,76 @@ struct hclge_set_led_state_cmd { u8 rsv2[20]; }; +struct hclge_get_fd_mode_cmd { + u8 mode; + u8 enable; + u8 rsv[22]; +}; + +struct hclge_get_fd_allocation_cmd { + __le32 stage1_entry_num; + __le32 stage2_entry_num; + __le16 stage1_counter_num; + __le16 stage2_counter_num; + u8 rsv[12]; +}; + +struct hclge_set_fd_key_config_cmd { + u8 stage; + u8 key_select; + u8 inner_sipv6_word_en; + u8 inner_dipv6_word_en; + u8 outer_sipv6_word_en; + u8 outer_dipv6_word_en; + u8 rsv1[2]; + __le32 tuple_mask; + __le32 meta_data_mask; + u8 rsv2[8]; +}; + +#define HCLGE_FD_EPORT_SW_EN_B 0 +struct hclge_fd_tcam_config_1_cmd { + u8 stage; + u8 xy_sel; + u8 port_info; + u8 rsv1[1]; + __le32 index; + u8 entry_vld; + u8 rsv2[7]; + u8 tcam_data[8]; +}; + +struct hclge_fd_tcam_config_2_cmd { + u8 tcam_data[24]; +}; + +struct hclge_fd_tcam_config_3_cmd { + u8 tcam_data[20]; + u8 rsv[4]; +}; + +#define HCLGE_FD_AD_DROP_B 0 +#define HCLGE_FD_AD_DIRECT_QID_B 1 +#define HCLGE_FD_AD_QID_S 2 +#define HCLGE_FD_AD_QID_M GENMASK(12, 2) +#define HCLGE_FD_AD_USE_COUNTER_B 12 +#define HCLGE_FD_AD_COUNTER_NUM_S 13 +#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13) +#define HCLGE_FD_AD_NXT_STEP_B 20 +#define HCLGE_FD_AD_NXT_KEY_S 21 +#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21) +#define HCLGE_FD_AD_WR_RULE_ID_B 0 +#define HCLGE_FD_AD_RULE_ID_S 1 +#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1) + +struct hclge_fd_ad_config_cmd { + u8 stage; + u8 rsv1[3]; + __le32 index; + __le64 ad_data; + u8 rsv2[8]; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index f08ebb7caaaf..e72f724123d7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -73,6 +73,7 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, u8 *tc, bool *changed) { + bool has_ets_tc = false; u32 total_ets_bw = 0; u8 max_tc = 0; u8 i; @@ -100,13 +101,14 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, *changed = true; total_ets_bw += ets->tc_tx_bw[i]; - break; + has_ets_tc = true; + break; default: return -EINVAL; } } - if (total_ets_bw != BW_PERCENT) + if (has_ets_tc && total_ets_bw != BW_PERCENT) return -EINVAL; *tc = max_tc + 1; @@ -182,7 +184,9 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) if (ret) return ret; - hclge_tm_schd_info_update(hdev, num_tc); + ret = hclge_tm_schd_info_update(hdev, num_tc); + if (ret) + return ret; ret = hclge_ieee_ets_to_tm_info(hdev, ets); if (ret) @@ -308,7 +312,9 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) return -EINVAL; } - hclge_tm_schd_info_update(hdev, tc); + ret = hclge_tm_schd_info_update(hdev, tc); + if (ret) + return ret; ret = hclge_tm_prio_tc_info_update(hdev, prio_tc); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c new file mode 100644 index 000000000000..f7e363b90fe0 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -0,0 +1,1088 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#include "hclge_err.h" + +static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { + { .int_msk = BIT(0), .msg = "imp_itcm0_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "imp_itcm1_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "imp_itcm2_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "imp_itcm3_ecc_1bit_err" }, + { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "imp_dtcm0_mem0_ecc_1bit_err" }, + { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "imp_dtcm0_mem1_ecc_1bit_err" }, + { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" }, + { .int_msk = BIT(12), .msg = "imp_dtcm1_mem0_ecc_1bit_err" }, + { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "imp_dtcm1_mem1_ecc_1bit_err" }, + { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_imp_itcm4_ecc_int[] = { + { .int_msk = BIT(0), .msg = "imp_itcm4_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "imp_itcm4_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { + { .int_msk = BIT(0), .msg = "cmdq_nic_rx_depth_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "cmdq_nic_tx_depth_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "cmdq_nic_rx_tail_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "cmdq_nic_tx_tail_ecc_1bit_err" }, + { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "cmdq_nic_rx_head_ecc_1bit_err" }, + { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "cmdq_nic_tx_head_ecc_1bit_err" }, + { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" }, + { .int_msk = BIT(12), .msg = "cmdq_nic_rx_addr_ecc_1bit_err" }, + { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "cmdq_nic_tx_addr_ecc_1bit_err" }, + { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_cmdq_rocee_mem_ecc_int[] = { + { .int_msk = BIT(0), .msg = "cmdq_rocee_rx_depth_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "cmdq_rocee_tx_depth_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "cmdq_rocee_rx_tail_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "cmdq_rocee_tx_tail_ecc_1bit_err" }, + { .int_msk = BIT(7), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "cmdq_rocee_rx_head_ecc_1bit_err" }, + { .int_msk = BIT(9), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "cmdq_rocee_tx_head_ecc_1bit_err" }, + { .int_msk = BIT(11), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" }, + { .int_msk = BIT(12), .msg = "cmdq_rocee_rx_addr_ecc_1bit_err" }, + { .int_msk = BIT(13), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "cmdq_rocee_tx_addr_ecc_1bit_err" }, + { .int_msk = BIT(15), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { + { .int_msk = BIT(0), .msg = "tqp_int_cfg_even_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "tqp_int_cfg_odd_ecc_1bit_err" }, + { .int_msk = BIT(2), .msg = "tqp_int_ctrl_even_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "tqp_int_ctrl_odd_ecc_1bit_err" }, + { .int_msk = BIT(4), .msg = "tx_que_scan_int_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "rx_que_scan_int_ecc_1bit_err" }, + { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" }, + { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" }, + { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" }, + { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_igu_com_err_int[] = { + { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "igu_rx_buf0_ecc_1bit_err" }, + { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" }, + { .int_msk = BIT(3), .msg = "igu_rx_buf1_ecc_1bit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = { + { .int_msk = BIT(0), .msg = "rx_buf_overflow" }, + { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" }, + { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" }, + { .int_msk = BIT(3), .msg = "tx_buf_overflow" }, + { .int_msk = BIT(4), .msg = "tx_buf_underrun" }, + { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ncsi_err_int[] = { + { .int_msk = BIT(0), .msg = "ncsi_tx_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppp_mpf_int0[] = { + { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_1bit_err" }, + { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_1bit_err" }, + { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_1bit_err" }, + { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_1bit_err" }, + { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_1bit_err" }, + { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_1bit_err" }, + { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_1bit_err" }, + { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_1bit_err" }, + { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_1bit_err" }, + { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_1bit_err" }, + { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_1bit_err" }, + { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_1bit_err" }, + { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_1bit_err" }, + { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_1bit_err" }, + { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_1bit_err" }, + { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_1bit_err" }, + { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_1bit_err" }, + { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_1bit_err" }, + { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_1bit_err" }, + { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_1bit_err" }, + { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_1bit_err" }, + { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_1bit_err" }, + { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_1bit_err" }, + { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_1bit_err" }, + { .int_msk = BIT(27), + .msg = "flow_director_ad_mem0_ecc_1bit_err" }, + { .int_msk = BIT(28), + .msg = "flow_director_ad_mem1_ecc_1bit_err" }, + { .int_msk = BIT(29), + .msg = "rx_vlan_tag_memory_ecc_1bit_err" }, + { .int_msk = BIT(30), + .msg = "Tx_UP_mapping_config_mem_ecc_1bit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppp_mpf_int1[] = { + { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" }, + { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" }, + { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" }, + { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" }, + { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" }, + { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" }, + { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" }, + { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" }, + { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" }, + { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" }, + { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" }, + { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" }, + { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" }, + { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" }, + { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" }, + { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" }, + { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" }, + { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" }, + { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" }, + { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" }, + { .int_msk = BIT(27), + .msg = "flow_director_ad_mem0_ecc_mbit_err" }, + { .int_msk = BIT(28), + .msg = "flow_director_ad_mem1_ecc_mbit_err" }, + { .int_msk = BIT(29), + .msg = "rx_vlan_tag_memory_ecc_mbit_err" }, + { .int_msk = BIT(30), + .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppp_pf_int[] = { + { .int_msk = BIT(0), .msg = "Tx_vlan_tag_err" }, + { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppp_mpf_int2[] = { + { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_1bit_err" }, + { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_1bit_err" }, + { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_1bit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppp_mpf_int3[] = { + { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" }, + { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" }, + { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" }, + { /* sentinel */ } +}; + +struct hclge_tm_sch_ecc_info { + const char *name; +}; + +static const struct hclge_tm_sch_ecc_info hclge_tm_sch_ecc_err[7][15] = { + { + { .name = "QSET_QUEUE_CTRL:PRI_LEN TAB" }, + { .name = "QSET_QUEUE_CTRL:SPA_LEN TAB" }, + { .name = "QSET_QUEUE_CTRL:SPB_LEN TAB" }, + { .name = "QSET_QUEUE_CTRL:WRRA_LEN TAB" }, + { .name = "QSET_QUEUE_CTRL:WRRB_LEN TAB" }, + { .name = "QSET_QUEUE_CTRL:SPA_HPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:SPB_HPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:WRRA_HPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:WRRB_HPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:QS_LINKLIST TAB" }, + { .name = "QSET_QUEUE_CTRL:SPA_TPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:SPB_TPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:WRRA_TPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:WRRB_TPTR TAB" }, + { .name = "QSET_QUEUE_CTRL:QS_DEFICITCNT TAB" }, + }, + { + { .name = "ROCE_QUEUE_CTRL:QS_LEN TAB" }, + { .name = "ROCE_QUEUE_CTRL:QS_TPTR TAB" }, + { .name = "ROCE_QUEUE_CTRL:QS_HPTR TAB" }, + { .name = "ROCE_QUEUE_CTRL:QLINKLIST TAB" }, + { .name = "ROCE_QUEUE_CTRL:QCLEN TAB" }, + }, + { + { .name = "NIC_QUEUE_CTRL:QS_LEN TAB" }, + { .name = "NIC_QUEUE_CTRL:QS_TPTR TAB" }, + { .name = "NIC_QUEUE_CTRL:QS_HPTR TAB" }, + { .name = "NIC_QUEUE_CTRL:QLINKLIST TAB" }, + { .name = "NIC_QUEUE_CTRL:QCLEN TAB" }, + }, + { + { .name = "RAM_CFG_CTRL:CSHAP TAB" }, + { .name = "RAM_CFG_CTRL:PSHAP TAB" }, + }, + { + { .name = "SHAPER_CTRL:PSHAP TAB" }, + }, + { + { .name = "MSCH_CTRL" }, + }, + { + { .name = "TOP_CTRL" }, + }, +}; + +static const struct hclge_hw_error hclge_tm_sch_err_int[] = { + { .int_msk = BIT(0), .msg = "tm_sch_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_full_err" }, + { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_empty_err" }, + { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_full_err" }, + { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_empty_err" }, + { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_full_err" }, + { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_empty_err" }, + { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_full_err" }, + { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_empty_err" }, + { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_full_err" }, + { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_empty_err" }, + { .int_msk = BIT(12), + .msg = "tm_sch_port_shap_offset_fifo_wr_full_err" }, + { .int_msk = BIT(13), + .msg = "tm_sch_port_shap_offset_fifo_rd_empty_err" }, + { .int_msk = BIT(14), + .msg = "tm_sch_pg_pshap_offset_fifo_wr_full_err" }, + { .int_msk = BIT(15), + .msg = "tm_sch_pg_pshap_offset_fifo_rd_empty_err" }, + { .int_msk = BIT(16), + .msg = "tm_sch_pg_cshap_offset_fifo_wr_full_err" }, + { .int_msk = BIT(17), + .msg = "tm_sch_pg_cshap_offset_fifo_rd_empty_err" }, + { .int_msk = BIT(18), + .msg = "tm_sch_pri_pshap_offset_fifo_wr_full_err" }, + { .int_msk = BIT(19), + .msg = "tm_sch_pri_pshap_offset_fifo_rd_empty_err" }, + { .int_msk = BIT(20), + .msg = "tm_sch_pri_cshap_offset_fifo_wr_full_err" }, + { .int_msk = BIT(21), + .msg = "tm_sch_pri_cshap_offset_fifo_rd_empty_err" }, + { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_full_err" }, + { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_empty_err" }, + { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_full_err" }, + { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_empty_err" }, + { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_full_err" }, + { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_empty_err" }, + { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_full_err" }, + { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_empty_err" }, + { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_full_err" }, + { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_empty_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_qcn_ecc_err_int[] = { + { .int_msk = BIT(0), .msg = "qcn_byte_mem_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "qcn_time_mem_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "qcn_fb_mem_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "qcn_link_mem_ecc_1bit_err" }, + { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "qcn_rate_mem_ecc_1bit_err" }, + { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "qcn_tmplt_mem_ecc_1bit_err" }, + { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" }, + { .int_msk = BIT(12), .msg = "qcn_shap_cfg_mem_ecc_1bit_err" }, + { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "qcn_gp0_barrel_mem_ecc_1bit_err" }, + { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" }, + { .int_msk = BIT(16), .msg = "qcn_gp1_barrel_mem_ecc_1bit_err" }, + { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" }, + { .int_msk = BIT(18), .msg = "qcn_gp2_barrel_mem_ecc_1bit_err" }, + { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" }, + { .int_msk = BIT(20), .msg = "qcn_gp3_barral_mem_ecc_1bit_err" }, + { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static void hclge_log_error(struct device *dev, + const struct hclge_hw_error *err_list, + u32 err_sts) +{ + const struct hclge_hw_error *err; + int i = 0; + + while (err_list[i].msg) { + err = &err_list[i]; + if (!(err->int_msk & err_sts)) { + i++; + continue; + } + dev_warn(dev, "%s [error status=0x%x] found\n", + err->msg, err_sts); + i++; + } +} + +/* hclge_cmd_query_error: read the error information + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @cmd: command opcode + * @flag: flag for extended command structure + * @w_num: offset for setting the read interrupt type. + * @int_type: select which type of the interrupt for which the error + * info will be read(RAS-CE/RAS-NFE/RAS-FE etc). + * + * This function query the error info from hw register/s using command + */ +static int hclge_cmd_query_error(struct hclge_dev *hdev, + struct hclge_desc *desc, u32 cmd, + u16 flag, u8 w_num, + enum hclge_err_int_type int_type) +{ + struct device *dev = &hdev->pdev->dev; + int num = 1; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], cmd, true); + if (flag) { + desc[0].flag |= cpu_to_le16(flag); + hclge_cmd_setup_basic_desc(&desc[1], cmd, true); + num = 2; + } + if (w_num) + desc[0].data[w_num] = cpu_to_le32(int_type); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "query error cmd failed (%d)\n", ret); + + return ret; +} + +/* hclge_cmd_clear_error: clear the error status + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @desc_src: prefilled descriptor from the previous command for reusing + * @cmd: command opcode + * @flag: flag for extended command structure + * + * This function clear the error status in the hw register/s using command + */ +static int hclge_cmd_clear_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + struct hclge_desc *desc_src, + u32 cmd, u16 flag) +{ + struct device *dev = &hdev->pdev->dev; + int num = 1; + int ret, i; + + if (cmd) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + if (flag) { + desc[0].flag |= cpu_to_le16(flag); + hclge_cmd_setup_basic_desc(&desc[1], cmd, false); + num = 2; + } + if (desc_src) { + for (i = 0; i < 6; i++) { + desc[0].data[i] = desc_src[0].data[i]; + if (flag) + desc[1].data[i] = desc_src[1].data[i]; + } + } + } else { + hclge_cmd_reuse_desc(&desc[0], false); + if (flag) { + desc[0].flag |= cpu_to_le16(flag); + hclge_cmd_reuse_desc(&desc[1], false); + num = 2; + } + } + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "clear error cmd failed (%d)\n", ret); + + return ret; +} + +static int hclge_enable_common_error(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false); + + if (en) { + /* enable COMMON error interrupts */ + desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN); + desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN | + HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN); + desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN); + desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN); + } else { + /* disable COMMON error interrupts */ + desc[0].data[0] = 0; + desc[0].data[2] = 0; + desc[0].data[3] = 0; + desc[0].data[4] = 0; + desc[0].data[5] = 0; + } + desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK); + desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK | + HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK); + desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK); + desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK); + desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, + "failed(%d) to enable/disable COMMON err interrupts\n", + ret); + + return ret; +} + +static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + if (hdev->pdev->revision < 0x21) + return 0; + + /* enable/disable NCSI error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN); + else + desc.data[0] = 0; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "failed(%d) to enable/disable NCSI error interrupts\n", + ret); + + return ret; +} + +static int hclge_enable_igu_egu_error(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* enable/disable error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN); + else + desc.data[0] = 0; + desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, + "failed(%d) to enable/disable IGU common interrupts\n", + ret); + return ret; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN); + else + desc.data[0] = 0; + desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, + "failed(%d) to enable/disable IGU-EGU TNL interrupts\n", + ret); + return ret; + } + + ret = hclge_enable_ncsi_error(hdev, en); + if (ret) + dev_err(dev, "fail(%d) to en/disable err int\n", ret); + + return ret; +} + +static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, + bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* enable/disable PPP error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], cmd, false); + + if (cmd == HCLGE_PPP_CMD0_INT_CMD) { + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN); + } else { + desc[0].data[0] = 0; + desc[0].data[1] = 0; + } + desc[1].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK); + } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) { + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN); + } else { + desc[0].data[0] = 0; + desc[0].data[1] = 0; + } + desc[1].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK); + } + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, + "failed(%d) to enable/disable PPP error interrupts\n", + ret); + + return ret; +} + +static int hclge_enable_ppp_error(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + int ret; + + ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD, + en); + if (ret) { + dev_err(dev, + "failed(%d) to enable/disable PPP error intr 0,1\n", + ret); + return ret; + } + + ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD, + en); + if (ret) + dev_err(dev, + "failed(%d) to enable/disable PPP error intr 2,3\n", + ret); + + return ret; +} + +int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* enable TM SCH hw errors */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN); + else + desc.data[0] = 0; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, "failed(%d) to configure TM SCH errors\n", ret); + return ret; + } + + /* enable TM QCN hw errors */ + ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, + 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read TM QCN CFG status\n", ret); + return ret; + } + + hclge_cmd_reuse_desc(&desc, false); + if (en) + desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN); + else + desc.data[1] = 0; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "failed(%d) to configure TM QCN mem errors\n", ret); + + return ret; +} + +static void hclge_process_common_error(struct hclge_dev *hdev, + enum hclge_err_int_type type) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + u32 err_sts; + int ret; + + /* read err sts */ + ret = hclge_cmd_query_error(hdev, &desc[0], + HCLGE_COMMON_ECC_INT_CFG, + HCLGE_CMD_FLAG_NEXT, 0, 0); + if (ret) { + dev_err(dev, + "failed(=%d) to query COMMON error interrupt status\n", + ret); + return; + } + + /* log err */ + err_sts = (le32_to_cpu(desc[0].data[0])) & HCLGE_IMP_TCM_ECC_INT_MASK; + hclge_log_error(dev, &hclge_imp_tcm_ecc_int[0], err_sts); + + err_sts = (le32_to_cpu(desc[0].data[1])) & HCLGE_CMDQ_ECC_INT_MASK; + hclge_log_error(dev, &hclge_cmdq_nic_mem_ecc_int[0], err_sts); + + err_sts = (le32_to_cpu(desc[0].data[1]) >> HCLGE_CMDQ_ROC_ECC_INT_SHIFT) + & HCLGE_CMDQ_ECC_INT_MASK; + hclge_log_error(dev, &hclge_cmdq_rocee_mem_ecc_int[0], err_sts); + + if ((le32_to_cpu(desc[0].data[3])) & BIT(0)) + dev_warn(dev, "imp_rd_data_poison_err found\n"); + + err_sts = (le32_to_cpu(desc[0].data[3]) >> HCLGE_TQP_ECC_INT_SHIFT) & + HCLGE_TQP_ECC_INT_MASK; + hclge_log_error(dev, &hclge_tqp_int_ecc_int[0], err_sts); + + err_sts = (le32_to_cpu(desc[0].data[5])) & + HCLGE_IMP_ITCM4_ECC_INT_MASK; + hclge_log_error(dev, &hclge_imp_itcm4_ecc_int[0], err_sts); + + /* clear error interrupts */ + desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_CLR_MASK); + desc[1].data[1] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_CLR_MASK | + HCLGE_CMDQ_ROCEE_ECC_CLR_MASK); + desc[1].data[3] = cpu_to_le32(HCLGE_TQP_IMP_ERR_CLR_MASK); + desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_CLR_MASK); + + ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0, + HCLGE_CMD_FLAG_NEXT); + if (ret) + dev_err(dev, + "failed(%d) to clear COMMON error interrupt status\n", + ret); +} + +static void hclge_process_ncsi_error(struct hclge_dev *hdev, + enum hclge_err_int_type type) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc_rd; + struct hclge_desc desc_wr; + u32 err_sts; + int ret; + + if (hdev->pdev->revision < 0x21) + return; + + /* read NCSI error status */ + ret = hclge_cmd_query_error(hdev, &desc_rd, HCLGE_NCSI_INT_QUERY, + 0, 1, HCLGE_NCSI_ERR_INT_TYPE); + if (ret) { + dev_err(dev, + "failed(=%d) to query NCSI error interrupt status\n", + ret); + return; + } + + /* log err */ + err_sts = le32_to_cpu(desc_rd.data[0]); + hclge_log_error(dev, &hclge_ncsi_err_int[0], err_sts); + + /* clear err int */ + ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, + HCLGE_NCSI_INT_CLR, 0); + if (ret) + dev_err(dev, "failed(=%d) to clear NCSI intrerrupt status\n", + ret); +} + +static void hclge_process_igu_egu_error(struct hclge_dev *hdev, + enum hclge_err_int_type int_type) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc_rd; + struct hclge_desc desc_wr; + u32 err_sts; + int ret; + + /* read IGU common err sts */ + ret = hclge_cmd_query_error(hdev, &desc_rd, + HCLGE_IGU_COMMON_INT_QUERY, + 0, 1, int_type); + if (ret) { + dev_err(dev, "failed(=%d) to query IGU common int status\n", + ret); + return; + } + + /* log err */ + err_sts = le32_to_cpu(desc_rd.data[0]) & + HCLGE_IGU_COM_INT_MASK; + hclge_log_error(dev, &hclge_igu_com_err_int[0], err_sts); + + /* clear err int */ + ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, + HCLGE_IGU_COMMON_INT_CLR, 0); + if (ret) { + dev_err(dev, "failed(=%d) to clear IGU common int status\n", + ret); + return; + } + + /* read IGU-EGU TNL err sts */ + ret = hclge_cmd_query_error(hdev, &desc_rd, + HCLGE_IGU_EGU_TNL_INT_QUERY, + 0, 1, int_type); + if (ret) { + dev_err(dev, "failed(=%d) to query IGU-EGU TNL int status\n", + ret); + return; + } + + /* log err */ + err_sts = le32_to_cpu(desc_rd.data[0]) & + HCLGE_IGU_EGU_TNL_INT_MASK; + hclge_log_error(dev, &hclge_igu_egu_tnl_err_int[0], err_sts); + + /* clear err int */ + ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, + HCLGE_IGU_EGU_TNL_INT_CLR, 0); + if (ret) { + dev_err(dev, "failed(=%d) to clear IGU-EGU TNL int status\n", + ret); + return; + } + + hclge_process_ncsi_error(hdev, HCLGE_ERR_INT_RAS_NFE); +} + +static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd, + enum hclge_err_int_type int_type) +{ + enum hnae3_reset_type reset_level = HNAE3_NONE_RESET; + struct device *dev = &hdev->pdev->dev; + const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3; + struct hclge_desc desc[2]; + u32 err_sts; + int ret; + + /* read PPP INT sts */ + ret = hclge_cmd_query_error(hdev, &desc[0], cmd, + HCLGE_CMD_FLAG_NEXT, 5, int_type); + if (ret) { + dev_err(dev, "failed(=%d) to query PPP interrupt status\n", + ret); + return -EIO; + } + + /* log error */ + if (cmd == HCLGE_PPP_CMD0_INT_CMD) { + hw_err_lst1 = &hclge_ppp_mpf_int0[0]; + hw_err_lst2 = &hclge_ppp_mpf_int1[0]; + hw_err_lst3 = &hclge_ppp_pf_int[0]; + } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) { + hw_err_lst1 = &hclge_ppp_mpf_int2[0]; + hw_err_lst2 = &hclge_ppp_mpf_int3[0]; + } else { + dev_err(dev, "invalid command(=%d)\n", cmd); + return -EINVAL; + } + + err_sts = le32_to_cpu(desc[0].data[2]); + if (err_sts) { + hclge_log_error(dev, hw_err_lst1, err_sts); + reset_level = HNAE3_FUNC_RESET; + } + + err_sts = le32_to_cpu(desc[0].data[3]); + if (err_sts) { + hclge_log_error(dev, hw_err_lst2, err_sts); + reset_level = HNAE3_FUNC_RESET; + } + + err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3; + if (err_sts) { + hclge_log_error(dev, hw_err_lst3, err_sts); + reset_level = HNAE3_FUNC_RESET; + } + + /* clear PPP INT */ + ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0, + HCLGE_CMD_FLAG_NEXT); + if (ret) { + dev_err(dev, "failed(=%d) to clear PPP interrupt status\n", + ret); + return -EIO; + } + + return 0; +} + +static void hclge_process_ppp_error(struct hclge_dev *hdev, + enum hclge_err_int_type int_type) +{ + struct device *dev = &hdev->pdev->dev; + int ret; + + /* read PPP INT0,1 sts */ + ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD0_INT_CMD, + int_type); + if (ret < 0) { + dev_err(dev, "failed(=%d) to clear PPP interrupt 0,1 status\n", + ret); + return; + } + + /* read err PPP INT2,3 sts */ + ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD1_INT_CMD, + int_type); + if (ret < 0) + dev_err(dev, "failed(=%d) to clear PPP interrupt 2,3 status\n", + ret); +} + +static void hclge_process_tm_sch_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + const struct hclge_tm_sch_ecc_info *tm_sch_ecc_info; + struct hclge_desc desc; + u32 ecc_info; + u8 module_no; + u8 ram_no; + int ret; + + /* read TM scheduler errors */ + ret = hclge_cmd_query_error(hdev, &desc, + HCLGE_TM_SCH_MBIT_ECC_INFO_CMD, 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read SCH mbit ECC err info\n", ret); + return; + } + ecc_info = le32_to_cpu(desc.data[0]); + + ret = hclge_cmd_query_error(hdev, &desc, + HCLGE_TM_SCH_ECC_ERR_RINT_CMD, 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read SCH ECC err status\n", ret); + return; + } + + /* log TM scheduler errors */ + if (le32_to_cpu(desc.data[0])) { + hclge_log_error(dev, &hclge_tm_sch_err_int[0], + le32_to_cpu(desc.data[0])); + if (le32_to_cpu(desc.data[0]) & 0x2) { + module_no = (ecc_info >> 20) & 0xF; + ram_no = (ecc_info >> 16) & 0xF; + tm_sch_ecc_info = + &hclge_tm_sch_ecc_err[module_no][ram_no]; + dev_warn(dev, "ecc err module:ram=%s\n", + tm_sch_ecc_info->name); + dev_warn(dev, "ecc memory address = 0x%x\n", + ecc_info & 0xFFFF); + } + } + + /* clear TM scheduler errors */ + ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to clear TM SCH error status\n", ret); + return; + } + + ret = hclge_cmd_query_error(hdev, &desc, + HCLGE_TM_SCH_ECC_ERR_RINT_CE, 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read SCH CE status\n", ret); + return; + } + + ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to clear TM SCH CE status\n", ret); + return; + } + + ret = hclge_cmd_query_error(hdev, &desc, + HCLGE_TM_SCH_ECC_ERR_RINT_NFE, 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read SCH NFE status\n", ret); + return; + } + + ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to clear TM SCH NFE status\n", ret); + return; + } + + ret = hclge_cmd_query_error(hdev, &desc, + HCLGE_TM_SCH_ECC_ERR_RINT_FE, 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read SCH FE status\n", ret); + return; + } + + ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + if (ret) + dev_err(dev, "failed(%d) to clear TM SCH FE status\n", ret); +} + +static void hclge_process_tm_qcn_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* read QCN errors */ + ret = hclge_cmd_query_error(hdev, &desc, + HCLGE_TM_QCN_MEM_INT_INFO_CMD, 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to read QCN ECC err status\n", ret); + return; + } + + /* log QCN errors */ + if (le32_to_cpu(desc.data[0])) + hclge_log_error(dev, &hclge_qcn_ecc_err_int[0], + le32_to_cpu(desc.data[0])); + + /* clear QCN errors */ + ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + if (ret) + dev_err(dev, "failed(%d) to clear QCN error status\n", ret); +} + +static void hclge_process_tm_error(struct hclge_dev *hdev, + enum hclge_err_int_type type) +{ + hclge_process_tm_sch_error(hdev); + hclge_process_tm_qcn_error(hdev); +} + +static const struct hclge_hw_blk hw_blk[] = { + { .msk = BIT(0), .name = "IGU_EGU", + .enable_error = hclge_enable_igu_egu_error, + .process_error = hclge_process_igu_egu_error, }, + { .msk = BIT(5), .name = "COMMON", + .enable_error = hclge_enable_common_error, + .process_error = hclge_process_common_error, }, + { .msk = BIT(4), .name = "TM", + .enable_error = hclge_enable_tm_hw_error, + .process_error = hclge_process_tm_error, }, + { .msk = BIT(1), .name = "PPP", + .enable_error = hclge_enable_ppp_error, + .process_error = hclge_process_ppp_error, }, + { /* sentinel */ } +}; + +int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state) +{ + struct device *dev = &hdev->pdev->dev; + int ret = 0; + int i = 0; + + while (hw_blk[i].name) { + if (!hw_blk[i].enable_error) { + i++; + continue; + } + ret = hw_blk[i].enable_error(hdev, state); + if (ret) { + dev_err(dev, "fail(%d) to en/disable err int\n", ret); + return ret; + } + i++; + } + + return ret; +} + +pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct device *dev = &hdev->pdev->dev; + u32 sts, val; + int i = 0; + + sts = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); + + /* Processing Non-fatal errors */ + if (sts & HCLGE_RAS_REG_NFE_MASK) { + val = (sts >> HCLGE_RAS_REG_NFE_SHIFT) & 0xFF; + i = 0; + while (hw_blk[i].name) { + if (!(hw_blk[i].msk & val)) { + i++; + continue; + } + dev_warn(dev, "%s ras non-fatal error identified\n", + hw_blk[i].name); + if (hw_blk[i].process_error) + hw_blk[i].process_error(hdev, + HCLGE_ERR_INT_RAS_NFE); + i++; + } + } + + return PCI_ERS_RESULT_NEED_RESET; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h new file mode 100644 index 000000000000..e0e3b5861495 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGE_ERR_H +#define __HCLGE_ERR_H + +#include "hclge_main.h" + +#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00 +#define HCLGE_RAS_REG_FE_MASK 0xFF +#define HCLGE_RAS_REG_NFE_MASK 0xFF00 +#define HCLGE_RAS_REG_NFE_SHIFT 8 + +#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000 +#define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000 +#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN 0x300 +#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK 0x300 +#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN 0xFFFF +#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK 0xFFFF +#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN 0xFFFF0000 +#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK 0xFFFF0000 +#define HCLGE_IMP_RD_POISON_ERR_INT_EN 0x0100 +#define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100 +#define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF +#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF +#define HCLGE_IGU_ERR_INT_EN 0x0000066F +#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F +#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF +#define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK 0xFFFFFFFF +#define HCLGE_PPP_PF_ERR_INT_EN 0x0003 +#define HCLGE_PPP_PF_ERR_INT_EN_MASK 0x0003 +#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F +#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3 +#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF +#define HCLGE_NCSI_ERR_INT_EN 0x3 +#define HCLGE_NCSI_ERR_INT_TYPE 0x9 + +#define HCLGE_IMP_TCM_ECC_INT_MASK 0xFFFF +#define HCLGE_IMP_ITCM4_ECC_INT_MASK 0x3 +#define HCLGE_CMDQ_ECC_INT_MASK 0xFFFF +#define HCLGE_CMDQ_ROC_ECC_INT_SHIFT 16 +#define HCLGE_TQP_ECC_INT_MASK 0xFFF +#define HCLGE_TQP_ECC_INT_SHIFT 16 +#define HCLGE_IMP_TCM_ECC_CLR_MASK 0xFFFF +#define HCLGE_IMP_ITCM4_ECC_CLR_MASK 0x3 +#define HCLGE_CMDQ_NIC_ECC_CLR_MASK 0xFFFF +#define HCLGE_CMDQ_ROCEE_ECC_CLR_MASK 0xFFFF0000 +#define HCLGE_TQP_IMP_ERR_CLR_MASK 0x0FFF0001 +#define HCLGE_IGU_COM_INT_MASK 0xF +#define HCLGE_IGU_EGU_TNL_INT_MASK 0x3F +#define HCLGE_PPP_PF_INT_MASK 0x100 + +enum hclge_err_int_type { + HCLGE_ERR_INT_MSIX = 0, + HCLGE_ERR_INT_RAS_CE = 1, + HCLGE_ERR_INT_RAS_NFE = 2, + HCLGE_ERR_INT_RAS_FE = 3, +}; + +struct hclge_hw_blk { + u32 msk; + const char *name; + int (*enable_error)(struct hclge_dev *hdev, bool en); + void (*process_error)(struct hclge_dev *hdev, + enum hclge_err_int_type type); +}; + +struct hclge_hw_error { + u32 int_msk; + const char *msg; +}; + +int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state); +int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en); +pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8577dfc799ad..5234b5373ed3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -19,20 +19,18 @@ #include "hclge_mbx.h" #include "hclge_mdio.h" #include "hclge_tm.h" +#include "hclge_err.h" #include "hnae3.h" #define HCLGE_NAME "hclge" #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) -#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) -#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) -static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, - enum hclge_mta_dmac_sel_type mta_mac_sel, - bool enable); static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); +static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, + u16 *allocated_size, bool is_alloc); static struct hnae3_ae_algo ae_algo; @@ -51,175 +49,12 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { - "Mac Loopback test", - "Serdes Loopback test", + "App Loopback test", + "Serdes serial Loopback test", + "Serdes parallel Loopback test", "Phy Loopback test" }; -static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { - {"igu_rx_oversize_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, - {"igu_rx_undersize_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, - {"igu_rx_out_all_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, - {"igu_rx_uni_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, - {"igu_rx_multi_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, - {"igu_rx_broad_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, - {"egu_tx_out_all_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, - {"egu_tx_uni_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, - {"egu_tx_multi_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, - {"egu_tx_broad_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, - {"ssu_ppp_mac_key_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, - {"ssu_ppp_host_key_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, - {"ppp_ssu_mac_rlt_num", - HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, - {"ppp_ssu_host_rlt_num", - HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, - {"ssu_tx_in_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, - {"ssu_tx_out_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, - {"ssu_rx_in_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, - {"ssu_rx_out_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} -}; - -static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { - {"igu_rx_err_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, - {"igu_rx_no_eof_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, - {"igu_rx_no_sof_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, - {"egu_tx_1588_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, - {"ssu_full_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, - {"ssu_part_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, - {"ppp_key_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, - {"ppp_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, - {"ssu_key_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, - {"pkt_curr_buf_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, - {"qcn_fb_rcv_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, - {"qcn_fb_drop_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, - {"qcn_fb_invaild_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, - {"rx_packet_tc0_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, - {"rx_packet_tc1_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, - {"rx_packet_tc2_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, - {"rx_packet_tc3_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, - {"rx_packet_tc4_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, - {"rx_packet_tc5_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, - {"rx_packet_tc6_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, - {"rx_packet_tc7_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, - {"rx_packet_tc0_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, - {"rx_packet_tc1_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, - {"rx_packet_tc2_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, - {"rx_packet_tc3_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, - {"rx_packet_tc4_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, - {"rx_packet_tc5_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, - {"rx_packet_tc6_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, - {"rx_packet_tc7_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, - {"tx_packet_tc0_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, - {"tx_packet_tc1_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, - {"tx_packet_tc2_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, - {"tx_packet_tc3_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, - {"tx_packet_tc4_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, - {"tx_packet_tc5_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, - {"tx_packet_tc6_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, - {"tx_packet_tc7_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, - {"tx_packet_tc0_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, - {"tx_packet_tc1_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, - {"tx_packet_tc2_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, - {"tx_packet_tc3_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, - {"tx_packet_tc4_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, - {"tx_packet_tc5_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, - {"tx_packet_tc6_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, - {"tx_packet_tc7_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, - {"pkt_curr_buf_tc0_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, - {"pkt_curr_buf_tc1_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, - {"pkt_curr_buf_tc2_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, - {"pkt_curr_buf_tc3_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, - {"pkt_curr_buf_tc4_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, - {"pkt_curr_buf_tc5_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, - {"pkt_curr_buf_tc6_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, - {"pkt_curr_buf_tc7_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, - {"mb_uncopy_num", - HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, - {"lo_pri_unicast_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, - {"hi_pri_multicast_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, - {"lo_pri_multicast_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, - {"rx_oq_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, - {"tx_oq_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, - {"nic_l2_err_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, - {"roc_l2_err_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} -}; - static const struct hclge_comm_stats_str g_mac_stats_string[] = { {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, @@ -394,109 +229,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { }, }; -static int hclge_64_bit_update_stats(struct hclge_dev *hdev) -{ -#define HCLGE_64_BIT_CMD_NUM 5 -#define HCLGE_64_BIT_RTN_DATANUM 4 - u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); - struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; - __le64 *desc_data; - int i, k, n; - int ret; - - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); - ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get 64 bit pkt stats fail, status = %d.\n", ret); - return ret; - } - - for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { - if (unlikely(i == 0)) { - desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_64_BIT_RTN_DATANUM - 1; - } else { - desc_data = (__le64 *)(&desc[i]); - n = HCLGE_64_BIT_RTN_DATANUM; - } - for (k = 0; k < n; k++) { - *data++ += le64_to_cpu(*desc_data); - desc_data++; - } - } - - return 0; -} - -static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) -{ - stats->pkt_curr_buf_cnt = 0; - stats->pkt_curr_buf_tc0_cnt = 0; - stats->pkt_curr_buf_tc1_cnt = 0; - stats->pkt_curr_buf_tc2_cnt = 0; - stats->pkt_curr_buf_tc3_cnt = 0; - stats->pkt_curr_buf_tc4_cnt = 0; - stats->pkt_curr_buf_tc5_cnt = 0; - stats->pkt_curr_buf_tc6_cnt = 0; - stats->pkt_curr_buf_tc7_cnt = 0; -} - -static int hclge_32_bit_update_stats(struct hclge_dev *hdev) -{ -#define HCLGE_32_BIT_CMD_NUM 8 -#define HCLGE_32_BIT_RTN_DATANUM 8 - - struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; - struct hclge_32_bit_stats *all_32_bit_stats; - __le32 *desc_data; - int i, k, n; - u64 *data; - int ret; - - all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; - data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); - - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); - ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get 32 bit pkt stats fail, status = %d.\n", ret); - - return ret; - } - - hclge_reset_partial_32bit_counter(all_32_bit_stats); - for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { - if (unlikely(i == 0)) { - __le16 *desc_data_16bit; - - all_32_bit_stats->igu_rx_err_pkt += - le32_to_cpu(desc[i].data[0]); - - desc_data_16bit = (__le16 *)&desc[i].data[1]; - all_32_bit_stats->igu_rx_no_eof_pkt += - le16_to_cpu(*desc_data_16bit); - - desc_data_16bit++; - all_32_bit_stats->igu_rx_no_sof_pkt += - le16_to_cpu(*desc_data_16bit); - - desc_data = &desc[i].data[2]; - n = HCLGE_32_BIT_RTN_DATANUM - 4; - } else { - desc_data = (__le32 *)&desc[i]; - n = HCLGE_32_BIT_RTN_DATANUM; - } - for (k = 0; k < n; k++) { - *data++ += le32_to_cpu(*desc_data); - desc_data++; - } - } - - return 0; -} - static int hclge_mac_update_stats(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 @@ -623,7 +355,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", tqp->index); buff = buff + ETH_GSTRING_LEN; } @@ -631,7 +363,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_tqp *tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", tqp->index); buff = buff + ETH_GSTRING_LEN; } @@ -675,14 +407,8 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, struct net_device_stats *net_stats) { net_stats->tx_dropped = 0; - net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; - net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; - net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; - net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; - net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; - net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; @@ -717,12 +443,6 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev) dev_err(&hdev->pdev->dev, "Update MAC stats fail, status = %d.\n", status); - status = hclge_32_bit_update_stats(hdev); - if (status) - dev_err(&hdev->pdev->dev, - "Update 32 bit stats fail, status = %d.\n", - status); - hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); } @@ -743,18 +463,6 @@ static void hclge_update_stats(struct hnae3_handle *handle, "Update MAC stats fail, status = %d.\n", status); - status = hclge_32_bit_update_stats(hdev); - if (status) - dev_err(&hdev->pdev->dev, - "Update 32 bit stats fail, status = %d.\n", - status); - - status = hclge_64_bit_update_stats(hdev); - if (status) - dev_err(&hdev->pdev->dev, - "Update 64 bit stats fail, status = %d.\n", - status); - status = hclge_tqps_update_stats(handle); if (status) dev_err(&hdev->pdev->dev, @@ -768,7 +476,10 @@ static void hclge_update_stats(struct hnae3_handle *handle, static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) { -#define HCLGE_LOOPBACK_TEST_FLAGS 0x7 +#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ + HNAE3_SUPPORT_PHY_LOOPBACK |\ + HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ + HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -782,19 +493,19 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) if (stringset == ETH_SS_TEST) { /* clear loopback bit flags at first */ handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); - if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || + if (hdev->pdev->revision >= 0x21 || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { count += 1; - handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; + handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; } - count++; - handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK; + count += 2; + handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; } else if (stringset == ETH_SS_STATS) { count = ARRAY_SIZE(g_mac_stats_string) + - ARRAY_SIZE(g_all_32bit_stats_string) + - ARRAY_SIZE(g_all_64bit_stats_string) + hclge_tqps_get_sset_count(handle, stringset); } @@ -814,33 +525,29 @@ static void hclge_get_strings(struct hnae3_handle *handle, g_mac_stats_string, size, p); - size = ARRAY_SIZE(g_all_32bit_stats_string); - p = hclge_comm_get_strings(stringset, - g_all_32bit_stats_string, - size, - p); - size = ARRAY_SIZE(g_all_64bit_stats_string); - p = hclge_comm_get_strings(stringset, - g_all_64bit_stats_string, - size, - p); p = hclge_tqps_get_strings(handle, p); } else if (stringset == ETH_SS_TEST) { - if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { + if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_LOOP_APP], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { memcpy(p, - hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], + hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { + if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { memcpy(p, - hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], + hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { memcpy(p, - hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], + hns3_nic_test_strs[HNAE3_LOOP_PHY], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } @@ -857,14 +564,6 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) g_mac_stats_string, ARRAY_SIZE(g_mac_stats_string), data); - p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, - g_all_32bit_stats_string, - ARRAY_SIZE(g_all_32bit_stats_string), - p); - p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, - g_all_64bit_stats_string, - ARRAY_SIZE(g_all_64bit_stats_string), - p); p = hclge_tqps_get_stats(handle, p); } @@ -1079,6 +778,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), HCLGE_CFG_SPEED_ABILITY_M, HCLGE_CFG_SPEED_ABILITY_S); + cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_UMV_TBL_SPACE_M, + HCLGE_CFG_UMV_TBL_SPACE_S); + if (!cfg->umv_space) + cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; } /* hclge_get_cfg: query the static parameter from flash @@ -1157,6 +861,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tm_info.num_pg = 1; hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; + hdev->wanted_umv_size = cfg.umv_space; ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); if (ret) { @@ -1657,11 +1362,13 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, static int hclge_rx_buffer_calc(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) { - u32 rx_all = hdev->pkt_buf_size; +#define HCLGE_BUF_SIZE_UNIT 128 + u32 rx_all = hdev->pkt_buf_size, aligned_mps; int no_pfc_priv_num, pfc_priv_num; struct hclge_priv_buf *priv; int i; + aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); rx_all -= hclge_get_tx_buff_alloced(buf_alloc); /* When DCB is not supported, rx private @@ -1680,13 +1387,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, if (hdev->hw_tc_map & BIT(i)) { priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = hdev->mps; - priv->wl.high = priv->wl.low + hdev->mps; + priv->wl.low = aligned_mps; + priv->wl.high = priv->wl.low + aligned_mps; priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; } else { priv->wl.low = 0; - priv->wl.high = 2 * hdev->mps; + priv->wl.high = 2 * aligned_mps; priv->buf_size = priv->wl.high; } } else { @@ -1718,11 +1425,11 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, if (hdev->tm_info.hw_pfc_map & BIT(i)) { priv->wl.low = 128; - priv->wl.high = priv->wl.low + hdev->mps; + priv->wl.high = priv->wl.low + aligned_mps; priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; } else { priv->wl.low = 0; - priv->wl.high = hdev->mps; + priv->wl.high = aligned_mps; priv->buf_size = priv->wl.high; } } @@ -2066,19 +1773,17 @@ static int hclge_init_msi(struct hclge_dev *hdev) return 0; } -static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) +static u8 hclge_check_speed_dup(u8 duplex, int speed) { - struct hclge_mac *mac = &hdev->hw.mac; - if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) - mac->duplex = (u8)duplex; - else - mac->duplex = HCLGE_MAC_FULL; + if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) + duplex = HCLGE_MAC_FULL; - mac->speed = speed; + return duplex; } -int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) +static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, + u8 duplex) { struct hclge_config_mac_speed_dup_cmd *req; struct hclge_desc desc; @@ -2138,7 +1843,23 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) return ret; } - hclge_check_speed_dup(hdev, duplex, speed); + return 0; +} + +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) +{ + int ret; + + duplex = hclge_check_speed_dup(duplex, speed); + if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) + return 0; + + ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); + if (ret) + return ret; + + hdev->hw.mac.speed = speed; + hdev->hw.mac.duplex = duplex; return 0; } @@ -2224,42 +1945,17 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) return hdev->hw.mac.autoneg; } -static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, - bool mask_vlan, - u8 *mac_mask) -{ - struct hclge_mac_vlan_mask_entry_cmd *req; - struct hclge_desc desc; - int status; - - req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); - - hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, - mask_vlan ? 1 : 0); - ether_addr_copy(req->mac_mask, mac_mask); - - status = hclge_cmd_send(&hdev->hw, &desc, 1); - if (status) - dev_err(&hdev->pdev->dev, - "Config mac_vlan_mask failed for cmd_send, ret =%d\n", - status); - - return status; -} - static int hclge_mac_init(struct hclge_dev *hdev) { struct hnae3_handle *handle = &hdev->vport[0].nic; struct net_device *netdev = handle->kinfo.netdev; struct hclge_mac *mac = &hdev->hw.mac; - u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; - struct hclge_vport *vport; int mtu; int ret; - int i; - ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); + hdev->hw.mac.duplex = HCLGE_MAC_FULL; + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, + hdev->hw.mac.duplex); if (ret) { dev_err(&hdev->pdev->dev, "Config mac speed dup fail ret=%d\n", ret); @@ -2268,39 +1964,6 @@ static int hclge_mac_init(struct hclge_dev *hdev) mac->link = 0; - /* Initialize the MTA table work mode */ - hdev->enable_mta = true; - hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; - - ret = hclge_set_mta_filter_mode(hdev, - hdev->mta_mac_sel_type, - hdev->enable_mta); - if (ret) { - dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", - ret); - return ret; - } - - for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - vport->accept_mta_mc = false; - - memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow)); - ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false); - if (ret) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", ret); - return ret; - } - } - - ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); - if (ret) { - dev_err(&hdev->pdev->dev, - "set default mac_vlan_mask fail ret=%d\n", ret); - return ret; - } - if (netdev) mtu = netdev->mtu; else @@ -2360,10 +2023,13 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev) int mac_state; int link_stat; + if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) + return 0; + mac_state = hclge_get_mac_link_status(hdev); if (hdev->hw.mac.phydev) { - if (!genphy_read_status(hdev->hw.mac.phydev)) + if (hdev->hw.mac.phydev->state == PHY_RUNNING) link_stat = mac_state & hdev->hw.mac.phydev->link; else @@ -2415,13 +2081,11 @@ static int hclge_update_speed_duplex(struct hclge_dev *hdev) return ret; } - if ((mac.speed != speed) || (mac.duplex != duplex)) { - ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac speed/duplex config failed %d\n", ret); - return ret; - } + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/duplex config failed %d\n", ret); + return ret; } return 0; @@ -2520,6 +2184,8 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, case HCLGE_VECTOR0_EVENT_MBX: hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); break; + default: + break; } } @@ -2793,8 +2459,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) static void hclge_reset(struct hclge_dev *hdev) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hnae3_handle *handle; + /* Initialize ae_dev reset status as well, in case enet layer wants to + * know if device is undergoing reset + */ + ae_dev->reset_type = hdev->reset_type; /* perform reset of the stack & ae device for a client */ handle = &hdev->vport[0].nic; rtnl_lock(); @@ -2815,14 +2486,21 @@ static void hclge_reset(struct hclge_dev *hdev) hclge_notify_client(hdev, HNAE3_UP_CLIENT); handle->last_reset_time = jiffies; rtnl_unlock(); + ae_dev->reset_type = HNAE3_NONE_RESET; } -static void hclge_reset_event(struct hnae3_handle *handle) +static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct hclge_dev *hdev = ae_dev->priv; - /* check if this is a new reset request and we are not here just because + /* We might end up getting called broadly because of 2 below cases: + * 1. Recoverable error was conveyed through APEI and only way to bring + * normalcy is to reset. + * 2. A new reset request from the stack due to timeout + * + * For the first case,error event might not have ae handle available. + * check if this is a new reset request and we are not here just because * last reset attempt did not succeed and watchdog hit us again. We will * know this if last reset request did not occur very recently (watchdog * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) @@ -2831,6 +2509,9 @@ static void hclge_reset_event(struct hnae3_handle *handle) * want to make sure we throttle the reset request. Therefore, we will * not allow it again before 3*HZ times. */ + if (!handle) + handle = &hdev->vport[0].nic; + if (time_before(jiffies, (handle->last_reset_time + 3 * HZ))) return; else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) @@ -3102,6 +2783,22 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, return ret; } +static void hclge_get_rss_type(struct hclge_vport *vport) +{ + if (vport->rss_tuple_sets.ipv4_tcp_en || + vport->rss_tuple_sets.ipv4_udp_en || + vport->rss_tuple_sets.ipv4_sctp_en || + vport->rss_tuple_sets.ipv6_tcp_en || + vport->rss_tuple_sets.ipv6_udp_en || + vport->rss_tuple_sets.ipv6_sctp_en) + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; + else if (vport->rss_tuple_sets.ipv4_fragment_en || + vport->rss_tuple_sets.ipv6_fragment_en) + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; + else + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; +} + static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) { struct hclge_rss_input_tuple_cmd *req; @@ -3121,6 +2818,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; + hclge_get_rss_type(&hdev->vport[0]); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(&hdev->pdev->dev, @@ -3135,8 +2833,19 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, int i; /* Get hash algorithm */ - if (hfunc) - *hfunc = vport->rss_algo; + if (hfunc) { + switch (vport->rss_algo) { + case HCLGE_RSS_HASH_ALGO_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case HCLGE_RSS_HASH_ALGO_SIMPLE: + *hfunc = ETH_RSS_HASH_XOR; + break; + default: + *hfunc = ETH_RSS_HASH_UNKNOWN; + break; + } + } /* Get the RSS Key required by the user */ if (key) @@ -3160,12 +2869,20 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, /* Set the RSS Hash Key if specififed by the user */ if (key) { - - if (hfunc == ETH_RSS_HASH_TOP || - hfunc == ETH_RSS_HASH_NO_CHANGE) + switch (hfunc) { + case ETH_RSS_HASH_TOP: hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; - else + break; + case ETH_RSS_HASH_XOR: + hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; + break; + case ETH_RSS_HASH_NO_CHANGE: + hash_algo = vport->rss_algo; + break; + default: return -EINVAL; + } + ret = hclge_set_rss_algo_key(hdev, hash_algo, key); if (ret) return ret; @@ -3283,6 +3000,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + hclge_get_rss_type(vport); return 0; } @@ -3608,6 +3326,1281 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, hclge_cmd_set_promisc_mode(hdev, ¶m); } +static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) +{ + struct hclge_get_fd_mode_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); + + req = (struct hclge_get_fd_mode_cmd *)desc.data; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); + return ret; + } + + *fd_mode = req->mode; + + return ret; +} + +static int hclge_get_fd_allocation(struct hclge_dev *hdev, + u32 *stage1_entry_num, + u32 *stage2_entry_num, + u16 *stage1_counter_num, + u16 *stage2_counter_num) +{ + struct hclge_get_fd_allocation_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); + + req = (struct hclge_get_fd_allocation_cmd *)desc.data; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", + ret); + return ret; + } + + *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); + *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); + *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); + *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); + + return ret; +} + +static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) +{ + struct hclge_set_fd_key_config_cmd *req; + struct hclge_fd_key_cfg *stage; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); + + req = (struct hclge_set_fd_key_config_cmd *)desc.data; + stage = &hdev->fd_cfg.key_cfg[stage_num]; + req->stage = stage_num; + req->key_select = stage->key_sel; + req->inner_sipv6_word_en = stage->inner_sipv6_word_en; + req->inner_dipv6_word_en = stage->inner_dipv6_word_en; + req->outer_sipv6_word_en = stage->outer_sipv6_word_en; + req->outer_dipv6_word_en = stage->outer_dipv6_word_en; + req->tuple_mask = cpu_to_le32(~stage->tuple_active); + req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); + + return ret; +} + +static int hclge_init_fd_config(struct hclge_dev *hdev) +{ +#define LOW_2_WORDS 0x03 + struct hclge_fd_key_cfg *key_cfg; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return 0; + + ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); + if (ret) + return ret; + + switch (hdev->fd_cfg.fd_mode) { + case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: + hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; + break; + case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: + hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; + break; + default: + dev_err(&hdev->pdev->dev, + "Unsupported flow director mode %d\n", + hdev->fd_cfg.fd_mode); + return -EOPNOTSUPP; + } + + hdev->fd_cfg.fd_en = true; + hdev->fd_cfg.proto_support = + TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | + UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; + key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; + key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, + key_cfg->inner_sipv6_word_en = LOW_2_WORDS; + key_cfg->inner_dipv6_word_en = LOW_2_WORDS; + key_cfg->outer_sipv6_word_en = 0; + key_cfg->outer_dipv6_word_en = 0; + + key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | + BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | + BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + /* If use max 400bit key, we can support tuples for ether type */ + if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { + hdev->fd_cfg.proto_support |= ETHER_FLOW; + key_cfg->tuple_active |= + BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); + } + + /* roce_type is used to filter roce frames + * dst_vport is used to specify the rule + */ + key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); + + ret = hclge_get_fd_allocation(hdev, + &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], + &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], + &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], + &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); + if (ret) + return ret; + + return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); +} + +static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, + int loc, u8 *key, bool is_add) +{ + struct hclge_fd_tcam_config_1_cmd *req1; + struct hclge_fd_tcam_config_2_cmd *req2; + struct hclge_fd_tcam_config_3_cmd *req3; + struct hclge_desc desc[3]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = stage; + req1->xy_sel = sel_x ? 1 : 0; + hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); + req1->index = cpu_to_le32(loc); + req1->entry_vld = sel_x ? is_add : 0; + + if (key) { + memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); + memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], + sizeof(req2->tcam_data)); + memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + + sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); + } + + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) + dev_err(&hdev->pdev->dev, + "config tcam key fail, ret=%d\n", + ret); + + return ret; +} + +static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, + struct hclge_fd_ad_data *action) +{ + struct hclge_fd_ad_config_cmd *req; + struct hclge_desc desc; + u64 ad_data = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); + + req = (struct hclge_fd_ad_config_cmd *)desc.data; + req->index = cpu_to_le32(loc); + req->stage = stage; + + hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, + action->write_rule_id_to_bd); + hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, + action->rule_id); + ad_data <<= 32; + hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); + hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, + action->forward_to_direct_queue); + hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, + action->queue_id); + hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); + hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, + HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); + hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); + hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, + action->counter_id); + + req->ad_data = cpu_to_le64(ad_data); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); + + return ret; +} + +static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, + struct hclge_fd_rule *rule) +{ + u16 tmp_x_s, tmp_y_s; + u32 tmp_x_l, tmp_y_l; + int i; + + if (rule->unused_tuple & tuple_bit) + return true; + + switch (tuple_bit) { + case 0: + return false; + case BIT(INNER_DST_MAC): + for (i = 0; i < 6; i++) { + calc_x(key_x[5 - i], rule->tuples.dst_mac[i], + rule->tuples_mask.dst_mac[i]); + calc_y(key_y[5 - i], rule->tuples.dst_mac[i], + rule->tuples_mask.dst_mac[i]); + } + + return true; + case BIT(INNER_SRC_MAC): + for (i = 0; i < 6; i++) { + calc_x(key_x[5 - i], rule->tuples.src_mac[i], + rule->tuples.src_mac[i]); + calc_y(key_y[5 - i], rule->tuples.src_mac[i], + rule->tuples.src_mac[i]); + } + + return true; + case BIT(INNER_VLAN_TAG_FST): + calc_x(tmp_x_s, rule->tuples.vlan_tag1, + rule->tuples_mask.vlan_tag1); + calc_y(tmp_y_s, rule->tuples.vlan_tag1, + rule->tuples_mask.vlan_tag1); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case BIT(INNER_ETH_TYPE): + calc_x(tmp_x_s, rule->tuples.ether_proto, + rule->tuples_mask.ether_proto); + calc_y(tmp_y_s, rule->tuples.ether_proto, + rule->tuples_mask.ether_proto); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case BIT(INNER_IP_TOS): + calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); + calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); + + return true; + case BIT(INNER_IP_PROTO): + calc_x(*key_x, rule->tuples.ip_proto, + rule->tuples_mask.ip_proto); + calc_y(*key_y, rule->tuples.ip_proto, + rule->tuples_mask.ip_proto); + + return true; + case BIT(INNER_SRC_IP): + calc_x(tmp_x_l, rule->tuples.src_ip[3], + rule->tuples_mask.src_ip[3]); + calc_y(tmp_y_l, rule->tuples.src_ip[3], + rule->tuples_mask.src_ip[3]); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + + return true; + case BIT(INNER_DST_IP): + calc_x(tmp_x_l, rule->tuples.dst_ip[3], + rule->tuples_mask.dst_ip[3]); + calc_y(tmp_y_l, rule->tuples.dst_ip[3], + rule->tuples_mask.dst_ip[3]); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + + return true; + case BIT(INNER_SRC_PORT): + calc_x(tmp_x_s, rule->tuples.src_port, + rule->tuples_mask.src_port); + calc_y(tmp_y_s, rule->tuples.src_port, + rule->tuples_mask.src_port); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case BIT(INNER_DST_PORT): + calc_x(tmp_x_s, rule->tuples.dst_port, + rule->tuples_mask.dst_port); + calc_y(tmp_y_s, rule->tuples.dst_port, + rule->tuples_mask.dst_port); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + default: + return false; + } +} + +static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, + u8 vf_id, u8 network_port_id) +{ + u32 port_number = 0; + + if (port_type == HOST_PORT) { + hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, + pf_id); + hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, + vf_id); + hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); + } else { + hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, + HCLGE_NETWORK_PORT_ID_S, network_port_id); + hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); + } + + return port_number; +} + +static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, + __le32 *key_x, __le32 *key_y, + struct hclge_fd_rule *rule) +{ + u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; + u8 cur_pos = 0, tuple_size, shift_bits; + int i; + + for (i = 0; i < MAX_META_DATA; i++) { + tuple_size = meta_data_key_info[i].key_length; + tuple_bit = key_cfg->meta_data_active & BIT(i); + + switch (tuple_bit) { + case BIT(ROCE_TYPE): + hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); + cur_pos += tuple_size; + break; + case BIT(DST_VPORT): + port_number = hclge_get_port_number(HOST_PORT, 0, + rule->vf_id, 0); + hnae3_set_field(meta_data, + GENMASK(cur_pos + tuple_size, cur_pos), + cur_pos, port_number); + cur_pos += tuple_size; + break; + default: + break; + } + } + + calc_x(tmp_x, meta_data, 0xFFFFFFFF); + calc_y(tmp_y, meta_data, 0xFFFFFFFF); + shift_bits = sizeof(meta_data) * 8 - cur_pos; + + *key_x = cpu_to_le32(tmp_x << shift_bits); + *key_y = cpu_to_le32(tmp_y << shift_bits); +} + +/* A complete key is combined with meta data key and tuple key. + * Meta data key is stored at the MSB region, and tuple key is stored at + * the LSB region, unused bits will be filled 0. + */ +static int hclge_config_key(struct hclge_dev *hdev, u8 stage, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; + u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; + u8 *cur_key_x, *cur_key_y; + int i, ret, tuple_size; + u8 meta_data_region; + + memset(key_x, 0, sizeof(key_x)); + memset(key_y, 0, sizeof(key_y)); + cur_key_x = key_x; + cur_key_y = key_y; + + for (i = 0 ; i < MAX_TUPLE; i++) { + bool tuple_valid; + u32 check_tuple; + + tuple_size = tuple_key_info[i].key_length / 8; + check_tuple = key_cfg->tuple_active & BIT(i); + + tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, + cur_key_y, rule); + if (tuple_valid) { + cur_key_x += tuple_size; + cur_key_y += tuple_size; + } + } + + meta_data_region = hdev->fd_cfg.max_key_length / 8 - + MAX_META_DATA_LENGTH / 8; + + hclge_fd_convert_meta_data(key_cfg, + (__le32 *)(key_x + meta_data_region), + (__le32 *)(key_y + meta_data_region), + rule); + + ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, + true); + if (ret) { + dev_err(&hdev->pdev->dev, + "fd key_y config fail, loc=%d, ret=%d\n", + rule->queue_id, ret); + return ret; + } + + ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, + true); + if (ret) + dev_err(&hdev->pdev->dev, + "fd key_x config fail, loc=%d, ret=%d\n", + rule->queue_id, ret); + return ret; +} + +static int hclge_config_action(struct hclge_dev *hdev, u8 stage, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_ad_data ad_data; + + ad_data.ad_id = rule->location; + + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + ad_data.drop_packet = true; + ad_data.forward_to_direct_queue = false; + ad_data.queue_id = 0; + } else { + ad_data.drop_packet = false; + ad_data.forward_to_direct_queue = true; + ad_data.queue_id = rule->queue_id; + } + + ad_data.use_counter = false; + ad_data.counter_id = 0; + + ad_data.use_next_stage = false; + ad_data.next_input_key = 0; + + ad_data.write_rule_id_to_bd = true; + ad_data.rule_id = rule->location; + + return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); +} + +static int hclge_fd_check_spec(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, u32 *unused) +{ + struct ethtool_tcpip4_spec *tcp_ip4_spec; + struct ethtool_usrip4_spec *usr_ip4_spec; + struct ethtool_tcpip6_spec *tcp_ip6_spec; + struct ethtool_usrip6_spec *usr_ip6_spec; + struct ethhdr *ether_spec; + + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + return -EINVAL; + + if (!(fs->flow_type & hdev->fd_cfg.proto_support)) + return -EOPNOTSUPP; + + if ((fs->flow_type & FLOW_EXT) && + (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { + dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); + return -EOPNOTSUPP; + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; + *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); + + if (!tcp_ip4_spec->ip4src) + *unused |= BIT(INNER_SRC_IP); + + if (!tcp_ip4_spec->ip4dst) + *unused |= BIT(INNER_DST_IP); + + if (!tcp_ip4_spec->psrc) + *unused |= BIT(INNER_SRC_PORT); + + if (!tcp_ip4_spec->pdst) + *unused |= BIT(INNER_DST_PORT); + + if (!tcp_ip4_spec->tos) + *unused |= BIT(INNER_IP_TOS); + + break; + case IP_USER_FLOW: + usr_ip4_spec = &fs->h_u.usr_ip4_spec; + *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + if (!usr_ip4_spec->ip4src) + *unused |= BIT(INNER_SRC_IP); + + if (!usr_ip4_spec->ip4dst) + *unused |= BIT(INNER_DST_IP); + + if (!usr_ip4_spec->tos) + *unused |= BIT(INNER_IP_TOS); + + if (!usr_ip4_spec->proto) + *unused |= BIT(INNER_IP_PROTO); + + if (usr_ip4_spec->l4_4_bytes) + return -EOPNOTSUPP; + + if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) + return -EOPNOTSUPP; + + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; + *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_IP_TOS); + + if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && + !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3]) + *unused |= BIT(INNER_SRC_IP); + + if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && + !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) + *unused |= BIT(INNER_DST_IP); + + if (!tcp_ip6_spec->psrc) + *unused |= BIT(INNER_SRC_PORT); + + if (!tcp_ip6_spec->pdst) + *unused |= BIT(INNER_DST_PORT); + + if (tcp_ip6_spec->tclass) + return -EOPNOTSUPP; + + break; + case IPV6_USER_FLOW: + usr_ip6_spec = &fs->h_u.usr_ip6_spec; + *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | + BIT(INNER_DST_PORT); + + if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && + !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3]) + *unused |= BIT(INNER_SRC_IP); + + if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && + !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) + *unused |= BIT(INNER_DST_IP); + + if (!usr_ip6_spec->l4_proto) + *unused |= BIT(INNER_IP_PROTO); + + if (usr_ip6_spec->tclass) + return -EOPNOTSUPP; + + if (usr_ip6_spec->l4_4_bytes) + return -EOPNOTSUPP; + + break; + case ETHER_FLOW: + ether_spec = &fs->h_u.ether_spec; + *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | + BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); + + if (is_zero_ether_addr(ether_spec->h_source)) + *unused |= BIT(INNER_SRC_MAC); + + if (is_zero_ether_addr(ether_spec->h_dest)) + *unused |= BIT(INNER_DST_MAC); + + if (!ether_spec->h_proto) + *unused |= BIT(INNER_ETH_TYPE); + + break; + default: + return -EOPNOTSUPP; + } + + if ((fs->flow_type & FLOW_EXT)) { + if (fs->h_ext.vlan_etype) + return -EOPNOTSUPP; + if (!fs->h_ext.vlan_tci) + *unused |= BIT(INNER_VLAN_TAG_FST); + + if (fs->m_ext.vlan_tci) { + if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) + return -EINVAL; + } + } else { + *unused |= BIT(INNER_VLAN_TAG_FST); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) + return -EOPNOTSUPP; + + if (is_zero_ether_addr(fs->h_ext.h_dest)) + *unused |= BIT(INNER_DST_MAC); + else + *unused &= ~(BIT(INNER_DST_MAC)); + } + + return 0; +} + +static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) +{ + struct hclge_fd_rule *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location >= location) + break; + } + + return rule && rule->location == location; +} + +static int hclge_fd_update_rule_list(struct hclge_dev *hdev, + struct hclge_fd_rule *new_rule, + u16 location, + bool is_add) +{ + struct hclge_fd_rule *rule = NULL, *parent = NULL; + struct hlist_node *node2; + + if (is_add && !new_rule) + return -EINVAL; + + hlist_for_each_entry_safe(rule, node2, + &hdev->fd_rule_list, rule_node) { + if (rule->location >= location) + break; + parent = rule; + } + + if (rule && rule->location == location) { + hlist_del(&rule->rule_node); + kfree(rule); + hdev->hclge_fd_rule_num--; + + if (!is_add) + return 0; + + } else if (!is_add) { + dev_err(&hdev->pdev->dev, + "delete fail, rule %d is inexistent\n", + location); + return -EINVAL; + } + + INIT_HLIST_NODE(&new_rule->rule_node); + + if (parent) + hlist_add_behind(&new_rule->rule_node, &parent->rule_node); + else + hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); + + hdev->hclge_fd_rule_num++; + + return 0; +} + +static int hclge_fd_get_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + + switch (flow_type) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + rule->tuples.src_ip[3] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); + rule->tuples_mask.src_ip[3] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); + + rule->tuples.dst_ip[3] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[3] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); + + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); + rule->tuples_mask.src_port = + be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); + + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); + rule->tuples_mask.dst_port = + be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); + + rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; + + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case IP_USER_FLOW: + rule->tuples.src_ip[3] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); + rule->tuples_mask.src_ip[3] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); + + rule->tuples.dst_ip[3] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[3] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); + + rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; + + rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; + + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + be32_to_cpu_array(rule->tuples.src_ip, + fs->h_u.tcp_ip6_spec.ip6src, 4); + be32_to_cpu_array(rule->tuples_mask.src_ip, + fs->m_u.tcp_ip6_spec.ip6src, 4); + + be32_to_cpu_array(rule->tuples.dst_ip, + fs->h_u.tcp_ip6_spec.ip6dst, 4); + be32_to_cpu_array(rule->tuples_mask.dst_ip, + fs->m_u.tcp_ip6_spec.ip6dst, 4); + + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); + rule->tuples_mask.src_port = + be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); + + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); + rule->tuples_mask.dst_port = + be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); + + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case IPV6_USER_FLOW: + be32_to_cpu_array(rule->tuples.src_ip, + fs->h_u.usr_ip6_spec.ip6src, 4); + be32_to_cpu_array(rule->tuples_mask.src_ip, + fs->m_u.usr_ip6_spec.ip6src, 4); + + be32_to_cpu_array(rule->tuples.dst_ip, + fs->h_u.usr_ip6_spec.ip6dst, 4); + be32_to_cpu_array(rule->tuples_mask.dst_ip, + fs->m_u.usr_ip6_spec.ip6dst, 4); + + rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; + + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case ETHER_FLOW: + ether_addr_copy(rule->tuples.src_mac, + fs->h_u.ether_spec.h_source); + ether_addr_copy(rule->tuples_mask.src_mac, + fs->m_u.ether_spec.h_source); + + ether_addr_copy(rule->tuples.dst_mac, + fs->h_u.ether_spec.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, + fs->m_u.ether_spec.h_dest); + + rule->tuples.ether_proto = + be16_to_cpu(fs->h_u.ether_spec.h_proto); + rule->tuples_mask.ether_proto = + be16_to_cpu(fs->m_u.ether_spec.h_proto); + + break; + default: + return -EOPNOTSUPP; + } + + switch (flow_type) { + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + rule->tuples.ip_proto = IPPROTO_SCTP; + rule->tuples_mask.ip_proto = 0xFF; + break; + case TCP_V4_FLOW: + case TCP_V6_FLOW: + rule->tuples.ip_proto = IPPROTO_TCP; + rule->tuples_mask.ip_proto = 0xFF; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + rule->tuples.ip_proto = IPPROTO_UDP; + rule->tuples_mask.ip_proto = 0xFF; + break; + default: + break; + } + + if ((fs->flow_type & FLOW_EXT)) { + rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); + rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); + } + + return 0; +} + +static int hclge_add_fd_entry(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u16 dst_vport_id = 0, q_index = 0; + struct ethtool_rx_flow_spec *fs; + struct hclge_fd_rule *rule; + u32 unused = 0; + u8 action; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + if (!hdev->fd_cfg.fd_en) { + dev_warn(&hdev->pdev->dev, + "Please enable flow director first\n"); + return -EOPNOTSUPP; + } + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + ret = hclge_fd_check_spec(hdev, fs, &unused); + if (ret) { + dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); + return ret; + } + + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + action = HCLGE_FD_ACTION_DROP_PACKET; + } else { + u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + u16 tqps; + + dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; + tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; + + if (ring >= tqps) { + dev_err(&hdev->pdev->dev, + "Error: queue id (%d) > max tqp num (%d)\n", + ring, tqps - 1); + return -EINVAL; + } + + if (vf > hdev->num_req_vfs) { + dev_err(&hdev->pdev->dev, + "Error: vf id (%d) > max vf num (%d)\n", + vf, hdev->num_req_vfs); + return -EINVAL; + } + + action = HCLGE_FD_ACTION_ACCEPT_PACKET; + q_index = ring; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + ret = hclge_fd_get_tuple(hdev, fs, rule); + if (ret) + goto free_rule; + + rule->flow_type = fs->flow_type; + + rule->location = fs->location; + rule->unused_tuple = unused; + rule->vf_id = dst_vport_id; + rule->queue_id = q_index; + rule->action = action; + + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) + goto free_rule; + + ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) + goto free_rule; + + ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true); + if (ret) + goto free_rule; + + return ret; + +free_rule: + kfree(rule); + return ret; +} + +static int hclge_del_fd_entry(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct ethtool_rx_flow_spec *fs; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + return -EINVAL; + + if (!hclge_fd_rule_exist(hdev, fs->location)) { + dev_err(&hdev->pdev->dev, + "Delete fail, rule %d is inexistent\n", + fs->location); + return -ENOENT; + } + + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + fs->location, NULL, false); + if (ret) + return ret; + + return hclge_fd_update_rule_list(hdev, NULL, fs->location, + false); +} + +static void hclge_del_all_fd_entries(struct hnae3_handle *handle, + bool clear_list) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; + + if (!hnae3_dev_fd_supported(hdev)) + return; + + if (clear_list) { + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, + rule_node) { + hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + hlist_del(&rule->rule_node); + kfree(rule); + hdev->hclge_fd_rule_num--; + } + } else { + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, + rule_node) + hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + } +} + +static int hclge_restore_fd_entries(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (!ret) + ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); + + if (ret) { + dev_warn(&hdev->pdev->dev, + "Restore rule %d failed, remove it\n", + rule->location); + hlist_del(&rule->rule_node); + kfree(rule); + hdev->hclge_fd_rule_num--; + } + } + return 0; +} + +static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + cmd->rule_cnt = hdev->hclge_fd_rule_num; + cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + + return 0; +} + +static int hclge_get_fd_rule_info(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_fd_rule *rule = NULL; + struct hclge_dev *hdev = vport->back; + struct ethtool_rx_flow_spec *fs; + struct hlist_node *node2; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location >= fs->location) + break; + } + + if (!rule || fs->location != rule->location) + return -ENOENT; + + fs->flow_type = rule->flow_type; + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + fs->h_u.tcp_ip4_spec.ip4src = + cpu_to_be32(rule->tuples.src_ip[3]); + fs->m_u.tcp_ip4_spec.ip4src = + rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); + + fs->h_u.tcp_ip4_spec.ip4dst = + cpu_to_be32(rule->tuples.dst_ip[3]); + fs->m_u.tcp_ip4_spec.ip4dst = + rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); + + fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); + fs->m_u.tcp_ip4_spec.psrc = + rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); + fs->m_u.tcp_ip4_spec.pdst = + rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); + + fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; + fs->m_u.tcp_ip4_spec.tos = + rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + break; + case IP_USER_FLOW: + fs->h_u.usr_ip4_spec.ip4src = + cpu_to_be32(rule->tuples.src_ip[3]); + fs->m_u.tcp_ip4_spec.ip4src = + rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); + + fs->h_u.usr_ip4_spec.ip4dst = + cpu_to_be32(rule->tuples.dst_ip[3]); + fs->m_u.usr_ip4_spec.ip4dst = + rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); + + fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; + fs->m_u.usr_ip4_spec.tos = + rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; + fs->m_u.usr_ip4_spec.proto = + rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; + + fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, + rule->tuples.src_ip, 4); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4); + else + cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, + rule->tuples_mask.src_ip, 4); + + cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst, + rule->tuples.dst_ip, 4); + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4); + else + cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, + rule->tuples_mask.dst_ip, 4); + + fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); + fs->m_u.tcp_ip6_spec.psrc = + rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); + fs->m_u.tcp_ip6_spec.pdst = + rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); + + break; + case IPV6_USER_FLOW: + cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, + rule->tuples.src_ip, 4); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4); + else + cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, + rule->tuples_mask.src_ip, 4); + + cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst, + rule->tuples.dst_ip, 4); + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4); + else + cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, + rule->tuples_mask.dst_ip, 4); + + fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; + fs->m_u.usr_ip6_spec.l4_proto = + rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; + + break; + case ETHER_FLOW: + ether_addr_copy(fs->h_u.ether_spec.h_source, + rule->tuples.src_mac); + if (rule->unused_tuple & BIT(INNER_SRC_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_source); + else + ether_addr_copy(fs->m_u.ether_spec.h_source, + rule->tuples_mask.src_mac); + + ether_addr_copy(fs->h_u.ether_spec.h_dest, + rule->tuples.dst_mac); + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_dest); + else + ether_addr_copy(fs->m_u.ether_spec.h_dest, + rule->tuples_mask.dst_mac); + + fs->h_u.ether_spec.h_proto = + cpu_to_be16(rule->tuples.ether_proto); + fs->m_u.ether_spec.h_proto = + rule->unused_tuple & BIT(INNER_ETH_TYPE) ? + 0 : cpu_to_be16(rule->tuples_mask.ether_proto); + + break; + default: + return -EOPNOTSUPP; + } + + if (fs->flow_type & FLOW_EXT) { + fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); + fs->m_ext.vlan_tci = + rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? + cpu_to_be16(VLAN_VID_MASK) : + cpu_to_be16(rule->tuples_mask.vlan_tag1); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_dest); + else + ether_addr_copy(fs->m_u.ether_spec.h_dest, + rule->tuples_mask.dst_mac); + } + + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + fs->ring_cookie = RX_CLS_FLOW_DISC; + } else { + u64 vf_id; + + fs->ring_cookie = rule->queue_id; + vf_id = rule->vf_id; + vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + fs->ring_cookie |= vf_id; + } + + return 0; +} + +static int hclge_get_all_rules(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node2; + int cnt = 0; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + + hlist_for_each_entry_safe(rule, node2, + &hdev->fd_rule_list, rule_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + + rule_locs[cnt] = rule->location; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hdev->fd_cfg.fd_en = enable; + if (!enable) + hclge_del_all_fd_entries(handle, false); + else + hclge_restore_fd_entries(handle); +} + static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) { struct hclge_desc desc; @@ -3639,7 +4632,7 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) "mac enable fail, ret =%d.\n", ret); } -static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) +static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) { struct hclge_config_mac_mode_cmd *req; struct hclge_desc desc; @@ -3659,6 +4652,8 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) /* 2 Then setup the loopback flag */ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); @@ -3673,22 +4668,37 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) return ret; } -static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en) +static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) { #define HCLGE_SERDES_RETRY_MS 10 #define HCLGE_SERDES_RETRY_NUM 100 struct hclge_serdes_lb_cmd *req; struct hclge_desc desc; int ret, i = 0; + u8 loop_mode_b; - req = (struct hclge_serdes_lb_cmd *)&desc.data[0]; + req = (struct hclge_serdes_lb_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); + switch (loop_mode) { + case HNAE3_LOOP_SERIAL_SERDES: + loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + break; + case HNAE3_LOOP_PARALLEL_SERDES: + loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; + break; + default: + dev_err(&hdev->pdev->dev, + "unsupported serdes loopback mode %d\n", loop_mode); + return -ENOTSUPP; + } + if (en) { - req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; - req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + req->enable = loop_mode_b; + req->mask = loop_mode_b; } else { - req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + req->mask = loop_mode_b; } ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -3719,33 +4729,10 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en) return -EIO; } + hclge_cfg_mac_mode(hdev, en); return 0; } -static int hclge_set_loopback(struct hnae3_handle *handle, - enum hnae3_loop loop_mode, bool en) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - int ret; - - switch (loop_mode) { - case HNAE3_MAC_INTER_LOOP_MAC: - ret = hclge_set_mac_loopback(hdev, en); - break; - case HNAE3_MAC_INTER_LOOP_SERDES: - ret = hclge_set_serdes_loopback(hdev, en); - break; - default: - ret = -ENOTSUPP; - dev_err(&hdev->pdev->dev, - "loop_mode %d is not supported\n", loop_mode); - break; - } - - return ret; -} - static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, int stream_id, bool enable) { @@ -3766,6 +4753,37 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, return ret; } +static int hclge_set_loopback(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int i, ret; + + switch (loop_mode) { + case HNAE3_LOOP_APP: + ret = hclge_set_app_loopback(hdev, en); + break; + case HNAE3_LOOP_SERIAL_SERDES: + case HNAE3_LOOP_PARALLEL_SERDES: + ret = hclge_set_serdes_loopback(hdev, en, loop_mode); + break; + default: + ret = -ENOTSUPP; + dev_err(&hdev->pdev->dev, + "loop_mode %d is not supported\n", loop_mode); + break; + } + + for (i = 0; i < vport->alloc_tqps; i++) { + ret = hclge_tqp_enable(hdev, i, 0, en); + if (ret) + return ret; + } + + return 0; +} + static void hclge_reset_tqp_stats(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -3809,6 +4827,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle) struct hclge_dev *hdev = vport->back; int i; + set_bit(HCLGE_STATE_DOWN, &hdev->state); + del_timer_sync(&hdev->service_timer); cancel_work_sync(&hdev->service_task); clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); @@ -3950,174 +4970,6 @@ static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); } -static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, - const u8 *addr) -{ - u16 high_val = addr[1] | (addr[0] << 8); - struct hclge_dev *hdev = vport->back; - u32 rsh = 4 - hdev->mta_mac_sel_type; - u16 ret_val = (high_val >> rsh) & 0xfff; - - return ret_val; -} - -static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, - enum hclge_mta_dmac_sel_type mta_mac_sel, - bool enable) -{ - struct hclge_mta_filter_mode_cmd *req; - struct hclge_desc desc; - int ret; - - req = (struct hclge_mta_filter_mode_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); - - hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, - enable); - hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, - HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Config mat filter mode failed for cmd_send, ret =%d.\n", - ret); - - return ret; -} - -int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, - u8 func_id, - bool enable) -{ - struct hclge_cfg_func_mta_filter_cmd *req; - struct hclge_desc desc; - int ret; - - req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); - - hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, - enable); - req->function_id = func_id; - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Config func_id enable failed for cmd_send, ret =%d.\n", - ret); - - return ret; -} - -static int hclge_set_mta_table_item(struct hclge_vport *vport, - u16 idx, - bool enable) -{ - struct hclge_dev *hdev = vport->back; - struct hclge_cfg_func_mta_item_cmd *req; - struct hclge_desc desc; - u16 item_idx = 0; - int ret; - - req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); - hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); - - hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, - HCLGE_CFG_MTA_ITEM_IDX_S, idx); - req->item_idx = cpu_to_le16(item_idx); - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Config mta table item failed for cmd_send, ret =%d.\n", - ret); - return ret; - } - - if (enable) - set_bit(idx, vport->mta_shadow); - else - clear_bit(idx, vport->mta_shadow); - - return 0; -} - -static int hclge_update_mta_status(struct hnae3_handle *handle) -{ - unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; - struct hclge_vport *vport = hclge_get_vport(handle); - struct net_device *netdev = handle->kinfo.netdev; - struct netdev_hw_addr *ha; - u16 tbl_idx; - - memset(mta_status, 0, sizeof(mta_status)); - - /* update mta_status from mc addr list */ - netdev_for_each_mc_addr(ha, netdev) { - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr); - set_bit(tbl_idx, mta_status); - } - - return hclge_update_mta_status_common(vport, mta_status, - 0, HCLGE_MTA_TBL_SIZE, true); -} - -int hclge_update_mta_status_common(struct hclge_vport *vport, - unsigned long *status, - u16 idx, - u16 count, - bool update_filter) -{ - struct hclge_dev *hdev = vport->back; - u16 update_max = idx + count; - u16 check_max; - int ret = 0; - bool used; - u16 i; - - /* setup mta check range */ - if (update_filter) { - i = 0; - check_max = HCLGE_MTA_TBL_SIZE; - } else { - i = idx; - check_max = update_max; - } - - used = false; - /* check and update all mta item */ - for (; i < check_max; i++) { - /* ignore unused item */ - if (!test_bit(i, vport->mta_shadow)) - continue; - - /* if i in update range then update it */ - if (i >= idx && i < update_max) - if (!test_bit(i - idx, status)) - hclge_set_mta_table_item(vport, i, false); - - if (!used && test_bit(i, vport->mta_shadow)) - used = true; - } - - /* no longer use mta, disable it */ - if (vport->accept_mta_mc && update_filter && !used) { - ret = hclge_cfg_func_mta_filter(hdev, - vport->vport_id, - false); - if (ret) - dev_err(&hdev->pdev->dev, - "disable func mta filter fail ret=%d\n", - ret); - else - vport->accept_mta_mc = false; - } - - return ret; -} - static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, struct hclge_mac_vlan_tbl_entry_cmd *req) { @@ -4241,6 +5093,118 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, return cfg_status; } +static int hclge_init_umv_space(struct hclge_dev *hdev) +{ + u16 allocated_size = 0; + int ret; + + ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, + true); + if (ret) + return ret; + + if (allocated_size < hdev->wanted_umv_size) + dev_warn(&hdev->pdev->dev, + "Alloc umv space failed, want %d, get %d\n", + hdev->wanted_umv_size, allocated_size); + + mutex_init(&hdev->umv_mutex); + hdev->max_umv_size = allocated_size; + hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_req_vfs + 2); + + return 0; +} + +static int hclge_uninit_umv_space(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->max_umv_size > 0) { + ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, + false); + if (ret) + return ret; + hdev->max_umv_size = 0; + } + mutex_destroy(&hdev->umv_mutex); + + return 0; +} + +static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, + u16 *allocated_size, bool is_alloc) +{ + struct hclge_umv_spc_alc_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_umv_spc_alc_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); + hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc); + req->space_size = cpu_to_le32(space_size); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "%s umv space failed for cmd_send, ret =%d\n", + is_alloc ? "allocate" : "free", ret); + return ret; + } + + if (is_alloc && allocated_size) + *allocated_size = le32_to_cpu(desc.data[1]); + + return 0; +} + +static void hclge_reset_umv_space(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + vport->used_umv_num = 0; + } + + mutex_lock(&hdev->umv_mutex); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_req_vfs + 2); + mutex_unlock(&hdev->umv_mutex); +} + +static bool hclge_is_umv_space_full(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + bool is_full; + + mutex_lock(&hdev->umv_mutex); + is_full = (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size == 0); + mutex_unlock(&hdev->umv_mutex); + + return is_full; +} + +static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) +{ + struct hclge_dev *hdev = vport->back; + + mutex_lock(&hdev->umv_mutex); + if (is_free) { + if (vport->used_umv_num > hdev->priv_umv_size) + hdev->share_umv_size++; + vport->used_umv_num--; + } else { + if (vport->used_umv_num >= hdev->priv_umv_size) + hdev->share_umv_size--; + vport->used_umv_num++; + } + mutex_unlock(&hdev->umv_mutex); +} + static int hclge_add_uc_addr(struct hnae3_handle *handle, const unsigned char *addr) { @@ -4286,8 +5250,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, * is not allowed in the mac vlan table. */ ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); - if (ret == -ENOENT) - return hclge_add_mac_vlan_tbl(vport, &req, NULL); + if (ret == -ENOENT) { + if (!hclge_is_umv_space_full(vport)) { + ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); + if (!ret) + hclge_update_umv_space(vport, false); + return ret; + } + + dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", + hdev->priv_umv_size); + + return -ENOSPC; + } /* check if we just hit the duplicate */ if (!ret) @@ -4330,6 +5305,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); ret = hclge_remove_mac_vlan_tbl(vport, &req); + if (!ret) + hclge_update_umv_space(vport, true); return ret; } @@ -4348,7 +5325,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; - u16 tbl_idx; int status; /* mac addr check */ @@ -4362,7 +5338,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4378,25 +5354,8 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, status = hclge_add_mac_vlan_tbl(vport, &req, desc); } - /* If mc mac vlan table is full, use MTA table */ - if (status == -ENOSPC) { - if (!vport->accept_mta_mc) { - status = hclge_cfg_func_mta_filter(hdev, - vport->vport_id, - true); - if (status) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", - status); - return status; - } - vport->accept_mta_mc = true; - } - - /* Set MTA table for this MAC address */ - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); - status = hclge_set_mta_table_item(vport, tbl_idx, true); - } + if (status == -ENOSPC) + dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); return status; } @@ -4429,7 +5388,7 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4598,8 +5557,20 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, return 0; } +static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, + int cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!hdev->hw.mac.phydev) + return -EOPNOTSUPP; + + return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); +} + static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, - bool filter_en) + u8 fe_type, bool filter_en) { struct hclge_vlan_filter_ctrl_cmd *req; struct hclge_desc desc; @@ -4609,7 +5580,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; req->vlan_type = vlan_type; - req->vlan_fe = filter_en; + req->vlan_fe = filter_en ? fe_type : 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -4621,13 +5592,34 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, #define HCLGE_FILTER_TYPE_VF 0 #define HCLGE_FILTER_TYPE_PORT 1 +#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) +#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) +#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) +#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) +#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) +#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ + | HCLGE_FILTER_FE_ROCE_EGRESS_B) +#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ + | HCLGE_FILTER_FE_ROCE_INGRESS_B) static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); + if (hdev->pdev->revision >= 0x21) { + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, enable); + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, enable); + } else { + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, enable); + } + if (enable) + handle->netdev_flags |= HNAE3_VLAN_FLTR; + else + handle->netdev_flags &= ~HNAE3_VLAN_FLTR; } static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, @@ -4686,9 +5678,17 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, "Add vf vlan filter fail, ret =%d.\n", req0->resp_code); } else { +#define HCLGE_VF_VLAN_DEL_NO_FOUND 1 if (!req0->resp_code) return 0; + if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) { + dev_warn(&hdev->pdev->dev, + "vlan %d filter is not in vf vlan table\n", + vlan); + return 0; + } + dev_err(&hdev->pdev->dev, "Kill vf vlan filter fail, ret =%d.\n", req0->resp_code); @@ -4732,6 +5732,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, u16 vport_idx, vport_num = 0; int ret; + if (is_kill && !vlan_id) + return 0; + ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 0, proto); if (ret) { @@ -4761,7 +5764,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, return -EINVAL; } - for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID) + for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) vport_num++; if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) @@ -4896,7 +5899,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); - tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; + tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); @@ -4913,18 +5916,30 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) { #define HCLGE_DEF_VLAN_TYPE 0x8100 - struct hnae3_handle *handle; + struct hnae3_handle *handle = &hdev->vport[0].nic; struct hclge_vport *vport; int ret; int i; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); - if (ret) - return ret; + if (hdev->pdev->revision >= 0x21) { + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, true); + if (ret) + return ret; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); - if (ret) - return ret; + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, true); + if (ret) + return ret; + } else { + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + true); + if (ret) + return ret; + } + + handle->netdev_flags |= HNAE3_VLAN_FLTR; hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; @@ -4970,7 +5985,6 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return ret; } - handle = &hdev->vport[0].nic; return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } @@ -5187,20 +6201,6 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle) return hdev->fw_version; } -static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, - u32 *flowctrl_adv) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - struct phy_device *phydev = hdev->hw.mac.phydev; - - if (!phydev) - return; - - *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | - (phydev->advertising & ADVERTISED_Asym_Pause); -} - static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) { struct phy_device *phydev = hdev->hw.mac.phydev; @@ -5208,13 +6208,7 @@ static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) if (!phydev) return; - phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - - if (rx_en) - phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - - if (tx_en) - phydev->advertising ^= ADVERTISED_Asym_Pause; + phy_set_asym_pause(phydev, rx_en, tx_en); } static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) @@ -5256,11 +6250,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev) if (!phydev->link || !phydev->autoneg) return 0; - if (phydev->advertising & ADVERTISED_Pause) - local_advertising = ADVERTISE_PAUSE_CAP; - - if (phydev->advertising & ADVERTISED_Asym_Pause) - local_advertising |= ADVERTISE_PAUSE_ASYM; + local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising); if (phydev->pause) remote_advertising = LPA_PAUSE_CAP; @@ -5444,26 +6434,31 @@ static int hclge_init_client_instance(struct hnae3_client *client, vport->nic.client = client; ret = client->ops->init_instance(&vport->nic); if (ret) - return ret; + goto clear_nic; ret = hclge_init_instance_hw(hdev); if (ret) { client->ops->uninit_instance(&vport->nic, 0); - return ret; + goto clear_nic; } + hnae3_set_client_init_flag(client, ae_dev, 1); + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; ret = hclge_init_roce_base_info(vport); if (ret) - return ret; + goto clear_roce; ret = rc->ops->init_instance(&vport->roce); if (ret) - return ret; + goto clear_roce; + + hnae3_set_client_init_flag(hdev->roce_client, + ae_dev, 1); } break; @@ -5473,7 +6468,9 @@ static int hclge_init_client_instance(struct hnae3_client *client, ret = client->ops->init_instance(&vport->nic); if (ret) - return ret; + goto clear_nic; + + hnae3_set_client_init_flag(client, ae_dev, 1); break; case HNAE3_CLIENT_ROCE: @@ -5485,16 +6482,31 @@ static int hclge_init_client_instance(struct hnae3_client *client, if (hdev->roce_client && hdev->nic_client) { ret = hclge_init_roce_base_info(vport); if (ret) - return ret; + goto clear_roce; ret = client->ops->init_instance(&vport->roce); if (ret) - return ret; + goto clear_roce; + + hnae3_set_client_init_flag(client, ae_dev, 1); } + + break; + default: + return -EINVAL; } } return 0; + +clear_nic: + hdev->nic_client = NULL; + vport->nic.client = NULL; + return ret; +clear_roce: + hdev->roce_client = NULL; + vport->roce.client = NULL; + return ret; } static void hclge_uninit_client_instance(struct hnae3_client *client, @@ -5514,7 +6526,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, } if (client->type == HNAE3_CLIENT_ROCE) return; - if (client->ops->uninit_instance) { + if (hdev->nic_client && client->ops->uninit_instance) { hclge_uninit_instance_hw(hdev); client->ops->uninit_instance(&vport->nic, 0); hdev->nic_client = NULL; @@ -5697,6 +6709,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) } } + ret = hclge_init_umv_space(hdev); + if (ret) { + dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); + goto err_msi_irq_uninit; + } + ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); @@ -5734,6 +6752,20 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } + ret = hclge_init_fd_config(hdev); + if (ret) { + dev_err(&pdev->dev, + "fd table init fail, ret=%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_hw_error_set_state(hdev, true); + if (ret) { + dev_err(&pdev->dev, + "hw error interrupts enable failed, ret =%d\n", ret); + goto err_mdiobus_unreg; + } + hclge_dcb_ops_set(hdev); timer_setup(&hdev->service_timer, hclge_service_timer, 0); @@ -5810,6 +6842,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + hclge_reset_umv_space(hdev); + ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); @@ -5840,6 +6874,19 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_init_fd_config(hdev); + if (ret) { + dev_err(&pdev->dev, + "fd table init fail, ret=%d\n", ret); + return ret; + } + + /* Re-enable the TM hw error interrupts because + * they get disabled on core/global reset. + */ + if (hclge_enable_tm_hw_error(hdev, true)) + dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n"); + dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", HCLGE_DRIVER_NAME); @@ -5856,10 +6903,13 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) if (mac->phydev) mdiobus_unregister(mac->mdio_bus); + hclge_uninit_umv_space(hdev); + /* Disable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, false); synchronize_irq(hdev->misc_vector.vector_irq); + hclge_hw_error_set_state(hdev, false); hclge_destroy_cmd_queue(&hdev->hw); hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); @@ -5887,18 +6937,12 @@ static void hclge_get_channels(struct hnae3_handle *handle, } static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, - u16 *free_tqps, u16 *max_rss_size) + u16 *alloc_tqps, u16 *max_rss_size) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - u16 temp_tqps = 0; - int i; - for (i = 0; i < hdev->num_tqps; i++) { - if (!hdev->htqp[i].alloced) - temp_tqps++; - } - *free_tqps = temp_tqps; + *alloc_tqps = vport->alloc_tqps; *max_rss_size = hdev->rss_size_max; } @@ -6228,27 +7272,6 @@ static void hclge_get_link_mode(struct hnae3_handle *handle, } } -static void hclge_get_port_type(struct hnae3_handle *handle, - u8 *port_type) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - u8 media_type = hdev->hw.mac.media_type; - - switch (media_type) { - case HNAE3_MEDIA_TYPE_FIBER: - *port_type = PORT_FIBRE; - break; - case HNAE3_MEDIA_TYPE_COPPER: - *port_type = PORT_TP; - break; - case HNAE3_MEDIA_TYPE_UNKNOWN: - default: - *port_type = PORT_OTHER; - break; - } -} - static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -6276,11 +7299,11 @@ static const struct hnae3_ae_ops hclge_ops = { .get_tc_size = hclge_get_tc_size, .get_mac_addr = hclge_get_mac_addr, .set_mac_addr = hclge_set_mac_addr, + .do_ioctl = hclge_do_ioctl, .add_uc_addr = hclge_add_uc_addr, .rm_uc_addr = hclge_rm_uc_addr, .add_mc_addr = hclge_add_mc_addr, .rm_mc_addr = hclge_rm_mc_addr, - .update_mta_status = hclge_update_mta_status, .set_autoneg = hclge_set_autoneg, .get_autoneg = hclge_get_autoneg, .get_pauseparam = hclge_get_pauseparam, @@ -6301,12 +7324,19 @@ static const struct hnae3_ae_ops hclge_ops = { .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, .set_channels = hclge_set_channels, .get_channels = hclge_get_channels, - .get_flowctrl_adv = hclge_get_flowctrl_adv, .get_regs_len = hclge_get_regs_len, .get_regs = hclge_get_regs, .set_led_id = hclge_set_led_id, .get_link_mode = hclge_get_link_mode, - .get_port_type = hclge_get_port_type, + .add_fd_entry = hclge_add_fd_entry, + .del_fd_entry = hclge_del_fd_entry, + .del_all_fd_entries = hclge_del_all_fd_entries, + .get_fd_rule_cnt = hclge_get_fd_rule_cnt, + .get_fd_rule_info = hclge_get_fd_rule_info, + .get_fd_all_rules = hclge_get_all_rules, + .restore_fd_rules = hclge_restore_fd_entries, + .enable_fd = hclge_enable_fd, + .process_hw_error = hclge_process_ras_hw_error, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 1528fb3fa6be..e3dfd654eca9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -14,6 +14,8 @@ #define HCLGE_MOD_VERSION "1.0" #define HCLGE_DRIVER_NAME "hclge" +#define HCLGE_MAX_PF_NUM 8 + #define HCLGE_INVALID_VPORT 0xffff #define HCLGE_PF_CFG_BLOCK_SIZE 32 @@ -53,7 +55,9 @@ #define HCLGE_RSS_TC_SIZE_6 64 #define HCLGE_RSS_TC_SIZE_7 128 -#define HCLGE_MTA_TBL_SIZE 4096 +#define HCLGE_UMV_TBL_SIZE 3072 +#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ + (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) #define HCLGE_TQP_RESET_TRY_TIMES 10 @@ -79,6 +83,19 @@ #define HCLGE_VF_NUM_PER_CMD 64 #define HCLGE_VF_NUM_PER_BYTE 8 +enum HLCGE_PORT_TYPE { + HOST_PORT, + NETWORK_PORT +}; + +#define HCLGE_PF_ID_S 0 +#define HCLGE_PF_ID_M GENMASK(2, 0) +#define HCLGE_VF_ID_S 3 +#define HCLGE_VF_ID_M GENMASK(10, 3) +#define HCLGE_PORT_TYPE_B 11 +#define HCLGE_NETWORK_PORT_ID_S 0 +#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) + /* Reset related Registers */ #define HCLGE_MISC_RESET_STS_REG 0x20700 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 @@ -149,13 +166,6 @@ enum HCLGE_MAC_DUPLEX { HCLGE_MAC_FULL }; -enum hclge_mta_dmac_sel_type { - HCLGE_MAC_ADDR_47_36, - HCLGE_MAC_ADDR_46_35, - HCLGE_MAC_ADDR_45_34, - HCLGE_MAC_ADDR_44_33, -}; - struct hclge_mac { u8 phy_addr; u8 flag; @@ -238,6 +248,7 @@ struct hclge_cfg { u8 default_speed; u32 numa_node_map; u8 speed_ability; + u16 umv_space; }; struct hclge_tm_info { @@ -256,109 +267,6 @@ struct hclge_comm_stats_str { unsigned long offset; }; -/* all 64bit stats, opcode id: 0x0030 */ -struct hclge_64_bit_stats { - /* query_igu_stat */ - u64 igu_rx_oversize_pkt; - u64 igu_rx_undersize_pkt; - u64 igu_rx_out_all_pkt; - u64 igu_rx_uni_pkt; - u64 igu_rx_multi_pkt; - u64 igu_rx_broad_pkt; - u64 rsv0; - - /* query_egu_stat */ - u64 egu_tx_out_all_pkt; - u64 egu_tx_uni_pkt; - u64 egu_tx_multi_pkt; - u64 egu_tx_broad_pkt; - - /* ssu_ppp packet stats */ - u64 ssu_ppp_mac_key_num; - u64 ssu_ppp_host_key_num; - u64 ppp_ssu_mac_rlt_num; - u64 ppp_ssu_host_rlt_num; - - /* ssu_tx_in_out_dfx_stats */ - u64 ssu_tx_in_num; - u64 ssu_tx_out_num; - /* ssu_rx_in_out_dfx_stats */ - u64 ssu_rx_in_num; - u64 ssu_rx_out_num; -}; - -/* all 32bit stats, opcode id: 0x0031 */ -struct hclge_32_bit_stats { - u64 igu_rx_err_pkt; - u64 igu_rx_no_eof_pkt; - u64 igu_rx_no_sof_pkt; - u64 egu_tx_1588_pkt; - u64 egu_tx_err_pkt; - u64 ssu_full_drop_num; - u64 ssu_part_drop_num; - u64 ppp_key_drop_num; - u64 ppp_rlt_drop_num; - u64 ssu_key_drop_num; - u64 pkt_curr_buf_cnt; - u64 qcn_fb_rcv_cnt; - u64 qcn_fb_drop_cnt; - u64 qcn_fb_invaild_cnt; - u64 rsv0; - u64 rx_packet_tc0_in_cnt; - u64 rx_packet_tc1_in_cnt; - u64 rx_packet_tc2_in_cnt; - u64 rx_packet_tc3_in_cnt; - u64 rx_packet_tc4_in_cnt; - u64 rx_packet_tc5_in_cnt; - u64 rx_packet_tc6_in_cnt; - u64 rx_packet_tc7_in_cnt; - u64 rx_packet_tc0_out_cnt; - u64 rx_packet_tc1_out_cnt; - u64 rx_packet_tc2_out_cnt; - u64 rx_packet_tc3_out_cnt; - u64 rx_packet_tc4_out_cnt; - u64 rx_packet_tc5_out_cnt; - u64 rx_packet_tc6_out_cnt; - u64 rx_packet_tc7_out_cnt; - - /* Tx packet level statistics */ - u64 tx_packet_tc0_in_cnt; - u64 tx_packet_tc1_in_cnt; - u64 tx_packet_tc2_in_cnt; - u64 tx_packet_tc3_in_cnt; - u64 tx_packet_tc4_in_cnt; - u64 tx_packet_tc5_in_cnt; - u64 tx_packet_tc6_in_cnt; - u64 tx_packet_tc7_in_cnt; - u64 tx_packet_tc0_out_cnt; - u64 tx_packet_tc1_out_cnt; - u64 tx_packet_tc2_out_cnt; - u64 tx_packet_tc3_out_cnt; - u64 tx_packet_tc4_out_cnt; - u64 tx_packet_tc5_out_cnt; - u64 tx_packet_tc6_out_cnt; - u64 tx_packet_tc7_out_cnt; - - /* packet buffer statistics */ - u64 pkt_curr_buf_tc0_cnt; - u64 pkt_curr_buf_tc1_cnt; - u64 pkt_curr_buf_tc2_cnt; - u64 pkt_curr_buf_tc3_cnt; - u64 pkt_curr_buf_tc4_cnt; - u64 pkt_curr_buf_tc5_cnt; - u64 pkt_curr_buf_tc6_cnt; - u64 pkt_curr_buf_tc7_cnt; - - u64 mb_uncopy_num; - u64 lo_pri_unicast_rlt_drop_num; - u64 hi_pri_multicast_rlt_drop_num; - u64 lo_pri_multicast_rlt_drop_num; - u64 rx_oq_drop_pkt_cnt; - u64 tx_oq_drop_pkt_cnt; - u64 nic_l2_err_drop_pkt_cnt; - u64 roc_l2_err_drop_pkt_cnt; -}; - /* mac stats ,opcode id: 0x0032 */ struct hclge_mac_stats { u64 mac_tx_mac_pause_num; @@ -450,8 +358,6 @@ struct hclge_mac_stats { #define HCLGE_STATS_TIMER_INTERVAL (60 * 5) struct hclge_hw_stats { struct hclge_mac_stats mac_stats; - struct hclge_64_bit_stats all_64_bit_stats; - struct hclge_32_bit_stats all_32_bit_stats; u32 stats_timer; }; @@ -464,6 +370,221 @@ struct hclge_vlan_type_cfg { u16 tx_in_vlan_type; }; +enum HCLGE_FD_MODE { + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1, + HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2, + HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1, + HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2, +}; + +enum HCLGE_FD_KEY_TYPE { + HCLGE_FD_KEY_BASE_ON_PTYPE, + HCLGE_FD_KEY_BASE_ON_TUPLE, +}; + +enum HCLGE_FD_STAGE { + HCLGE_FD_STAGE_1, + HCLGE_FD_STAGE_2, +}; + +/* OUTER_XXX indicates tuples in tunnel header of tunnel packet + * INNER_XXX indicate tuples in tunneled header of tunnel packet or + * tuples of non-tunnel packet + */ +enum HCLGE_FD_TUPLE { + OUTER_DST_MAC, + OUTER_SRC_MAC, + OUTER_VLAN_TAG_FST, + OUTER_VLAN_TAG_SEC, + OUTER_ETH_TYPE, + OUTER_L2_RSV, + OUTER_IP_TOS, + OUTER_IP_PROTO, + OUTER_SRC_IP, + OUTER_DST_IP, + OUTER_L3_RSV, + OUTER_SRC_PORT, + OUTER_DST_PORT, + OUTER_L4_RSV, + OUTER_TUN_VNI, + OUTER_TUN_FLOW_ID, + INNER_DST_MAC, + INNER_SRC_MAC, + INNER_VLAN_TAG_FST, + INNER_VLAN_TAG_SEC, + INNER_ETH_TYPE, + INNER_L2_RSV, + INNER_IP_TOS, + INNER_IP_PROTO, + INNER_SRC_IP, + INNER_DST_IP, + INNER_L3_RSV, + INNER_SRC_PORT, + INNER_DST_PORT, + INNER_L4_RSV, + MAX_TUPLE, +}; + +enum HCLGE_FD_META_DATA { + PACKET_TYPE_ID, + IP_FRAGEMENT, + ROCE_TYPE, + NEXT_KEY, + VLAN_NUMBER, + SRC_VPORT, + DST_VPORT, + TUNNEL_PACKET, + MAX_META_DATA, +}; + +struct key_info { + u8 key_type; + u8 key_length; +}; + +static const struct key_info meta_data_key_info[] = { + { PACKET_TYPE_ID, 6}, + { IP_FRAGEMENT, 1}, + { ROCE_TYPE, 1}, + { NEXT_KEY, 5}, + { VLAN_NUMBER, 2}, + { SRC_VPORT, 12}, + { DST_VPORT, 12}, + { TUNNEL_PACKET, 1}, +}; + +static const struct key_info tuple_key_info[] = { + { OUTER_DST_MAC, 48}, + { OUTER_SRC_MAC, 48}, + { OUTER_VLAN_TAG_FST, 16}, + { OUTER_VLAN_TAG_SEC, 16}, + { OUTER_ETH_TYPE, 16}, + { OUTER_L2_RSV, 16}, + { OUTER_IP_TOS, 8}, + { OUTER_IP_PROTO, 8}, + { OUTER_SRC_IP, 32}, + { OUTER_DST_IP, 32}, + { OUTER_L3_RSV, 16}, + { OUTER_SRC_PORT, 16}, + { OUTER_DST_PORT, 16}, + { OUTER_L4_RSV, 32}, + { OUTER_TUN_VNI, 24}, + { OUTER_TUN_FLOW_ID, 8}, + { INNER_DST_MAC, 48}, + { INNER_SRC_MAC, 48}, + { INNER_VLAN_TAG_FST, 16}, + { INNER_VLAN_TAG_SEC, 16}, + { INNER_ETH_TYPE, 16}, + { INNER_L2_RSV, 16}, + { INNER_IP_TOS, 8}, + { INNER_IP_PROTO, 8}, + { INNER_SRC_IP, 32}, + { INNER_DST_IP, 32}, + { INNER_L3_RSV, 16}, + { INNER_SRC_PORT, 16}, + { INNER_DST_PORT, 16}, + { INNER_L4_RSV, 32}, +}; + +#define MAX_KEY_LENGTH 400 +#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4) +#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) +#define MAX_META_DATA_LENGTH 32 + +enum HCLGE_FD_PACKET_TYPE { + NIC_PACKET, + ROCE_PACKET, +}; + +enum HCLGE_FD_ACTION { + HCLGE_FD_ACTION_ACCEPT_PACKET, + HCLGE_FD_ACTION_DROP_PACKET, +}; + +struct hclge_fd_key_cfg { + u8 key_sel; + u8 inner_sipv6_word_en; + u8 inner_dipv6_word_en; + u8 outer_sipv6_word_en; + u8 outer_dipv6_word_en; + u32 tuple_active; + u32 meta_data_active; +}; + +struct hclge_fd_cfg { + u8 fd_mode; + u8 fd_en; + u16 max_key_length; + u32 proto_support; + u32 rule_num[2]; /* rule entry number */ + u16 cnt_num[2]; /* rule hit counter number */ + struct hclge_fd_key_cfg key_cfg[2]; +}; + +struct hclge_fd_rule_tuples { + u8 src_mac[6]; + u8 dst_mac[6]; + u32 src_ip[4]; + u32 dst_ip[4]; + u16 src_port; + u16 dst_port; + u16 vlan_tag1; + u16 ether_proto; + u8 ip_tos; + u8 ip_proto; +}; + +struct hclge_fd_rule { + struct hlist_node rule_node; + struct hclge_fd_rule_tuples tuples; + struct hclge_fd_rule_tuples tuples_mask; + u32 unused_tuple; + u32 flow_type; + u8 action; + u16 vf_id; + u16 queue_id; + u16 location; +}; + +struct hclge_fd_ad_data { + u16 ad_id; + u8 drop_packet; + u8 forward_to_direct_queue; + u16 queue_id; + u8 use_counter; + u8 counter_id; + u8 use_next_stage; + u8 write_rule_id_to_bd; + u8 next_input_key; + u16 rule_id; +}; + +/* For each bit of TCAM entry, it uses a pair of 'x' and + * 'y' to indicate which value to match, like below: + * ---------------------------------- + * | bit x | bit y | search value | + * ---------------------------------- + * | 0 | 0 | always hit | + * ---------------------------------- + * | 1 | 0 | match '0' | + * ---------------------------------- + * | 0 | 1 | match '1' | + * ---------------------------------- + * | 1 | 1 | invalid | + * ---------------------------------- + * Then for input key(k) and mask(v), we can calculate the value by + * the formulae: + * x = (~k) & v + * y = (k ^ ~v) & k + */ +#define calc_x(x, k, v) ((x) = (~(k) & (v))) +#define calc_y(y, k, v) \ + do { \ + const typeof(k) _k_ = (k); \ + const typeof(v) _v_ = (v); \ + (y) = (_k_ ^ ~_v_) & (_k_); \ + } while (0) + #define HCLGE_VPORT_NUM 256 struct hclge_dev { struct pci_dev *pdev; @@ -547,12 +668,22 @@ struct hclge_dev { u32 pkt_buf_size; /* Total pf buf size for tx/rx */ u32 mps; /* Max packet size */ - enum hclge_mta_dmac_sel_type mta_mac_sel_type; - bool enable_mta; /* Multicast filter enable */ - struct hclge_vlan_type_cfg vlan_type_cfg; unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + + struct hclge_fd_cfg fd_cfg; + struct hlist_head fd_rule_list; + u16 hclge_fd_rule_num; + + u16 wanted_umv_size; + /* max available unicast mac vlan space */ + u16 max_umv_size; + /* private unicast mac vlan space, it's same for PF and its VFs */ + u16 priv_umv_size; + /* unicast mac vlan space shared by PF and its VFs */ + u16 share_umv_size; + struct mutex umv_mutex; /* protect share_umv_size */ }; /* VPort level vlan tag configuration for TX direction */ @@ -605,13 +736,12 @@ struct hclge_vport { struct hclge_tx_vtag_cfg txvlan_cfg; struct hclge_rx_vtag_cfg rxvlan_cfg; + u16 used_umv_num; + int vport_id; struct hclge_dev *back; /* Back reference to associated dev */ struct hnae3_handle nic; struct hnae3_handle roce; - - bool accept_mta_mc; /* whether to accept mta filter multicast */ - unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -626,15 +756,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, int hclge_rm_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr); -int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, - u8 func_id, - bool enable); -int hclge_update_mta_status_common(struct hclge_vport *vport, - unsigned long *status, - u16 idx, - u16 count, - bool update_filter); - struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); int hclge_bind_ring_with_vector(struct hclge_vport *vport, int vector_id, bool en, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f34851c91eb3..04462a347a94 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -233,43 +233,6 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, return 0; } -static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport, - u8 *msg, u8 idx, bool is_end) -{ -#define HCLGE_MTA_STATUS_MSG_SIZE 13 -#define HCLGE_MTA_STATUS_MSG_BITS \ - (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) -#define HCLGE_MTA_STATUS_MSG_END_BITS \ - (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS) - unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)]; - u16 tbl_cnt; - u16 tbl_idx; - u8 msg_ofs; - u8 msg_bit; - - tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS : - HCLGE_MTA_STATUS_MSG_BITS; - - /* set msg field */ - msg_ofs = 0; - msg_bit = 0; - memset(status, 0, sizeof(status)); - for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) { - if (msg[msg_ofs] & BIT(msg_bit)) - set_bit(tbl_idx, status); - - msg_bit++; - if (msg_bit == BITS_PER_BYTE) { - msg_bit = 0; - msg_ofs++; - } - } - - return hclge_update_mta_status_common(vport, - status, idx * HCLGE_MTA_STATUS_MSG_BITS, - tbl_cnt, is_end); -} - static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) @@ -284,27 +247,6 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, status = hclge_add_mc_addr_common(vport, mac_addr); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { status = hclge_rm_mc_addr_common(vport, mac_addr); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) { - u8 func_id = vport->vport_id; - bool enable = mbx_req->msg[2]; - - status = hclge_cfg_func_mta_filter(hdev, func_id, enable); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) { - resp_data = hdev->mta_mac_sel_type; - resp_len = sizeof(u8); - gen_resp = true; - status = 0; - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) { - /* mta status update msg format - * msg[2.6 : 2.0] msg index - * msg[2.7] msg is end - * msg[15 : 3] mta status bits[103 : 0] - */ - bool is_end = (mbx_req->msg[2] & 0x80) ? true : false; - - status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3], - mbx_req->msg[2] & 0x7F, - is_end); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 398971a062f4..24b1f2a0c32a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -10,8 +10,6 @@ #define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \ SUPPORTED_TP | \ - SUPPORTED_Pause | \ - SUPPORTED_Asym_Pause | \ PHY_10BT_FEATURES | \ PHY_100BT_FEATURES | \ PHY_1000BT_FEATURES) @@ -213,7 +211,7 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) } phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES; - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 5db70a1451c5..aa5cb9834d73 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -172,7 +172,7 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, u8 pfc_bitmap) { struct hclge_desc desc; - struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data; + struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); @@ -188,11 +188,12 @@ static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, struct hclge_cfg_pause_param_cmd *pause_param; struct hclge_desc desc; - pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); ether_addr_copy(pause_param->mac_addr, addr); + ether_addr_copy(pause_param->mac_addr_extra, addr); pause_param->pause_trans_gap = pause_trans_gap; pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); @@ -207,7 +208,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) u8 trans_gap; int ret; - pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); @@ -297,7 +298,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, } static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, - u8 q_id, u16 qs_id) + u16 q_id, u16 qs_id) { struct hclge_nq_to_qs_link_cmd *map; struct hclge_desc desc; @@ -1279,10 +1280,15 @@ int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) return 0; } -void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) +int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) { u8 i, bit_map = 0; + for (i = 0; i < hdev->num_alloc_vport; i++) { + if (num_tc > hdev->vport[i].alloc_tqps) + return -EINVAL; + } + hdev->tm_info.num_tc = num_tc; for (i = 0; i < hdev->tm_info.num_tc; i++) @@ -1296,6 +1302,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) hdev->hw_tc_map = bit_map; hclge_tm_schd_info_init(hdev); + + return 0; } int hclge_tm_init_hw(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index dd4c194747c1..25eef13a3e14 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -106,6 +106,10 @@ struct hclge_cfg_pause_param_cmd { u8 pause_trans_gap; u8 rsvd; __le16 pause_trans_time; + u8 rsvd1[6]; + /* extra mac address to do double check for pause frame */ + u8 mac_addr_extra[ETH_ALEN]; + u16 rsvd2; }; struct hclge_pfc_stats_cmd { @@ -128,7 +132,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev); int hclge_pause_setup_hw(struct hclge_dev *hdev); int hclge_tm_schd_mode_hw(struct hclge_dev *hdev); int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); -void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); +int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); int hclge_tm_map_cfg(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index fb471fe2c494..0d3b445f6799 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -132,9 +132,9 @@ static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, reg_val |= HCLGEVF_NIC_CMQ_ENABLE; hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); - break; + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + return 0; case HCLGEVF_TYPE_CRQ: reg_val = (u32)ring->desc_dma_addr; hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); @@ -145,12 +145,12 @@ static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, reg_val |= HCLGEVF_NIC_CMQ_ENABLE; hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); - break; + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); + return 0; + default: + return -EINVAL; } - - return 0; } void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 19b32860309c..bc294b0c8b62 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -89,6 +89,7 @@ enum hclgevf_opcode_type { HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20, /* RSS cmd */ HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02, HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07, HCLGEVF_OPC_RSS_TC_MODE = 0x0D08, /* Mailbox cmd */ @@ -148,7 +149,8 @@ struct hclgevf_query_res_cmd { __le16 rsv[7]; }; -#define HCLGEVF_RSS_HASH_KEY_OFFSET 4 +#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4 +#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4 #define HCLGEVF_RSS_HASH_KEY_NUM 16 struct hclgevf_rss_config_cmd { u8 hash_config; @@ -159,11 +161,11 @@ struct hclgevf_rss_config_cmd { struct hclgevf_rss_input_tuple_cmd { u8 ipv4_tcp_en; u8 ipv4_udp_en; - u8 ipv4_stcp_en; + u8 ipv4_sctp_en; u8 ipv4_fragment_en; u8 ipv6_tcp_en; u8 ipv6_udp_en; - u8 ipv6_stcp_en; + u8 ipv6_sctp_en; u8 ipv6_fragment_en; u8 rsv[16]; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 9c0091f2addf..e0a86a58342c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -31,16 +31,15 @@ static inline struct hclgevf_dev *hclgevf_ae_get_hdev( static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hnae3_queue *queue; struct hclgevf_desc desc; struct hclgevf_tqp *tqp; int status; int i; - for (i = 0; i < hdev->num_tqps; i++) { - queue = handle->kinfo.tqp[i]; - tqp = container_of(queue, struct hclgevf_tqp, q); + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_RX_STATUS, true); @@ -77,17 +76,16 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_tqp *tqp; u64 *buff = data; int i; - for (i = 0; i < hdev->num_tqps; i++) { - tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; } for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; } @@ -96,29 +94,29 @@ static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_knic_private_info *kinfo = &handle->kinfo; - return hdev->num_tqps * 2; + return kinfo->num_tqps * 2; } static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_knic_private_info *kinfo = &handle->kinfo; u8 *buff = data; int i = 0; - for (i = 0; i < hdev->num_tqps; i++) { - struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], - struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], + struct hclgevf_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", tqp->index); buff += ETH_GSTRING_LEN; } - for (i = 0; i < hdev->num_tqps; i++) { - struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], - struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], + struct hclgevf_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", tqp->index); buff += ETH_GSTRING_LEN; } @@ -182,7 +180,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) return 0; } -static int hclge_get_queue_info(struct hclgevf_dev *hdev) +static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_RSS_INFO_LEN 8 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; @@ -299,6 +297,9 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) client = handle->client; + link_state = + test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; + if (link_state != hdev->hw.mac.link) { client->ops->link_status_change(handle, !!link_state); hdev->hw.mac.link = link_state; @@ -385,6 +386,47 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) return -EINVAL; } +static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, + const u8 hfunc, const u8 *key) +{ + struct hclgevf_rss_config_cmd *req; + struct hclgevf_desc desc; + int key_offset; + int key_size; + int ret; + + req = (struct hclgevf_rss_config_cmd *)desc.data; + + for (key_offset = 0; key_offset < 3; key_offset++) { + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_RSS_GENERIC_CONFIG, + false); + + req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); + req->hash_config |= + (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); + + if (key_offset == 2) + key_size = + HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; + else + key_size = HCLGEVF_RSS_HASH_KEY_NUM; + + memcpy(req->hash_key, + key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure RSS config fail, status = %d\n", + ret); + return ret; + } + } + + return 0; +} + static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) { return HCLGEVF_RSS_KEY_SIZE; @@ -465,68 +507,40 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) return status; } -static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, - u8 *key) +static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, + u8 *hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_config_cmd *req; - int lkup_times = key ? 3 : 1; - struct hclgevf_desc desc; - int key_offset; - int key_size; - int status; - - req = (struct hclgevf_rss_config_cmd *)desc.data; - lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); - - for (key_offset = 0; key_offset < lkup_times; key_offset++) { - hclgevf_cmd_setup_basic_desc(&desc, - HCLGEVF_OPC_RSS_GENERIC_CONFIG, - true); - req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + int i; - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "failed to get hardware RSS cfg, status = %d\n", - status); - return status; + if (handle->pdev->revision >= 0x21) { + /* Get hash algorithm */ + if (hfunc) { + switch (rss_cfg->hash_algo) { + case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case HCLGEVF_RSS_HASH_ALGO_SIMPLE: + *hfunc = ETH_RSS_HASH_XOR; + break; + default: + *hfunc = ETH_RSS_HASH_UNKNOWN; + break; + } } - if (key_offset == 2) - key_size = - HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; - else - key_size = HCLGEVF_RSS_HASH_KEY_NUM; - + /* Get the RSS Key required by the user */ if (key) - memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, - req->hash_key, - key_size); - } - - if (hash) { - if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) - *hash = ETH_RSS_HASH_TOP; - else - *hash = ETH_RSS_HASH_UNKNOWN; + memcpy(key, rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); } - return 0; -} - -static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, - u8 *hfunc) -{ - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; - if (indir) for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) indir[i] = rss_cfg->rss_indirection_tbl[i]; - return hclgevf_get_rss_hw_cfg(handle, hfunc, key); + return 0; } static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, @@ -534,7 +548,36 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; + int ret, i; + + if (handle->pdev->revision >= 0x21) { + /* Set the RSS Hash Key if specififed by the user */ + if (key) { + switch (hfunc) { + case ETH_RSS_HASH_TOP: + rss_cfg->hash_algo = + HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; + break; + case ETH_RSS_HASH_XOR: + rss_cfg->hash_algo = + HCLGEVF_RSS_HASH_ALGO_SIMPLE; + break; + case ETH_RSS_HASH_NO_CHANGE: + break; + default: + return -EINVAL; + } + + ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, + key); + if (ret) + return ret; + + /* Update the shadow RSS key with user specified qids */ + memcpy(rss_cfg->rss_hash_key, key, + HCLGEVF_RSS_KEY_SIZE); + } + } /* update the shadow RSS table with user specified qids */ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) @@ -544,6 +587,193 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, return hclgevf_set_rss_indir_table(hdev); } +static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) +{ + u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; + + if (nfc->data & RXH_L4_B_2_3) + hash_sets |= HCLGEVF_D_PORT_BIT; + else + hash_sets &= ~HCLGEVF_D_PORT_BIT; + + if (nfc->data & RXH_IP_SRC) + hash_sets |= HCLGEVF_S_IP_BIT; + else + hash_sets &= ~HCLGEVF_S_IP_BIT; + + if (nfc->data & RXH_IP_DST) + hash_sets |= HCLGEVF_D_IP_BIT; + else + hash_sets &= ~HCLGEVF_D_IP_BIT; + + if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) + hash_sets |= HCLGEVF_V_TAG_BIT; + + return hash_sets; +} + +static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclgevf_rss_input_tuple_cmd *req; + struct hclgevf_desc desc; + u8 tuple_sets; + int ret; + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + tuple_sets = hclgevf_get_rss_hash_bits(nfc); + switch (nfc->flow_type) { + case TCP_V4_FLOW: + req->ipv4_tcp_en = tuple_sets; + break; + case TCP_V6_FLOW: + req->ipv6_tcp_en = tuple_sets; + break; + case UDP_V4_FLOW: + req->ipv4_udp_en = tuple_sets; + break; + case UDP_V6_FLOW: + req->ipv6_udp_en = tuple_sets; + break; + case SCTP_V4_FLOW: + req->ipv4_sctp_en = tuple_sets; + break; + case SCTP_V6_FLOW: + if ((nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + + req->ipv6_sctp_en = tuple_sets; + break; + case IPV4_FLOW: + req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + break; + case IPV6_FLOW: + req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + break; + default: + return -EINVAL; + } + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set rss tuple fail, status = %d\n", ret); + return ret; + } + + rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; + rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; + rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; + rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; + rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; + rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; + rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; + rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + return 0; +} + +static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + u8 tuple_sets; + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + nfc->data = 0; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + break; + case UDP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; + break; + case TCP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + break; + case UDP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; + break; + case SCTP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + break; + case SCTP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + break; + case IPV4_FLOW: + case IPV6_FLOW: + tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; + break; + default: + return -EINVAL; + } + + if (!tuple_sets) + return 0; + + if (tuple_sets & HCLGEVF_D_PORT_BIT) + nfc->data |= RXH_L4_B_2_3; + if (tuple_sets & HCLGEVF_S_PORT_BIT) + nfc->data |= RXH_L4_B_0_1; + if (tuple_sets & HCLGEVF_D_IP_BIT) + nfc->data |= RXH_IP_DST; + if (tuple_sets & HCLGEVF_S_IP_BIT) + nfc->data |= RXH_IP_SRC; + + return 0; +} + +static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, + struct hclgevf_rss_cfg *rss_cfg) +{ + struct hclgevf_rss_input_tuple_cmd *req; + struct hclgevf_desc desc; + int ret; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); + + req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Configure rss input fail, status = %d\n", ret); + return ret; +} + static int hclgevf_get_tc_size(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -735,138 +965,16 @@ static int hclgevf_get_queue_id(struct hnae3_queue *queue) static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hnae3_queue *queue; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_tqp *tqp; int i; - for (i = 0; i < hdev->num_tqps; i++) { - queue = handle->kinfo.tqp[i]; - tqp = container_of(queue, struct hclgevf_tqp, q); + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); } } -static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) -{ - u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; - int ret; - - ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, - NULL, 0, true, &resp_msg, sizeof(u8)); - - if (ret) { - dev_err(&hdev->pdev->dev, - "Read mta type fail, ret=%d.\n", ret); - return ret; - } - - if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { - dev_err(&hdev->pdev->dev, - "Read mta type invalid, resp=%d.\n", resp_msg); - return -EINVAL; - } - - hdev->mta_mac_sel_type = resp_msg; - - return 0; -} - -static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, - const u8 *addr) -{ - u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; - u16 high_val = addr[1] | (addr[0] << 8); - - return (high_val >> rsh) & 0xfff; -} - -static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, - unsigned long *status) -{ -#define HCLGEVF_MTA_STATUS_MSG_SIZE 13 -#define HCLGEVF_MTA_STATUS_MSG_BITS \ - (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) -#define HCLGEVF_MTA_STATUS_MSG_END_BITS \ - (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) - u16 tbl_cnt; - u16 tbl_idx; - u8 msg_cnt; - u8 msg_idx; - int ret; - - msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, - HCLGEVF_MTA_STATUS_MSG_BITS); - tbl_idx = 0; - msg_idx = 0; - while (msg_cnt--) { - u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; - u8 *p = &msg[1]; - u8 msg_ofs; - u8 msg_bit; - - memset(msg, 0, sizeof(msg)); - - /* set index field */ - msg[0] = 0x7F & msg_idx; - - /* set end flag field */ - if (msg_cnt == 0) { - msg[0] |= 0x80; - tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; - } else { - tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; - } - - /* set status field */ - msg_ofs = 0; - msg_bit = 0; - while (tbl_cnt--) { - if (test_bit(tbl_idx, status)) - p[msg_ofs] |= BIT(msg_bit); - - tbl_idx++; - - msg_bit++; - if (msg_bit == BITS_PER_BYTE) { - msg_bit = 0; - msg_ofs++; - } - } - - ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, - msg, sizeof(msg), false, NULL, 0); - if (ret) - break; - - msg_idx++; - } - - return ret; -} - -static int hclgevf_update_mta_status(struct hnae3_handle *handle) -{ - unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct net_device *netdev = hdev->nic.kinfo.netdev; - struct netdev_hw_addr *ha; - u16 tbl_idx; - - /* clear status */ - memset(mta_status, 0, sizeof(mta_status)); - - /* update status from mc addr list */ - netdev_for_each_mc_addr(ha, netdev) { - tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); - set_bit(tbl_idx, mta_status); - } - - return hclgevf_do_update_mta_status(hdev, mta_status); -} - static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -1106,7 +1214,8 @@ static int hclgevf_do_reset(struct hclgevf_dev *hdev) return status; } -static void hclgevf_reset_event(struct hnae3_handle *handle) +static void hclgevf_reset_event(struct pci_dev *pdev, + struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -1341,8 +1450,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; + hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; + /* get queue configuration from PF */ - ret = hclge_get_queue_info(hdev); + ret = hclgevf_get_queue_info(hdev); if (ret) return ret; /* get tc configuration from PF */ @@ -1395,6 +1506,39 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) rss_cfg->rss_size = hdev->rss_size_max; + if (hdev->pdev->revision >= 0x21) { + rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; + netdev_rss_key_fill(rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); + + ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, + rss_cfg->rss_hash_key); + if (ret) + return ret; + + rss_cfg->rss_tuple_sets.ipv4_tcp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv4_udp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv4_sctp_en = + HCLGEVF_RSS_INPUT_TUPLE_SCTP; + rss_cfg->rss_tuple_sets.ipv4_fragment_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv6_tcp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv6_udp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv6_sctp_en = + HCLGEVF_RSS_INPUT_TUPLE_SCTP; + rss_cfg->rss_tuple_sets.ipv6_fragment_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + + ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); + if (ret) + return ret; + + } + /* Initialize RSS indirect table for each vport */ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; @@ -1417,12 +1561,13 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) static int hclgevf_ae_start(struct hnae3_handle *handle) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int i, queue_id; - for (i = 0; i < handle->kinfo.num_tqps; i++) { + for (i = 0; i < kinfo->num_tqps; i++) { /* ring enable */ - queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); + queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); if (queue_id < 0) { dev_warn(&hdev->pdev->dev, "Get invalid queue id, ignore it\n"); @@ -1445,12 +1590,15 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) static void hclgevf_ae_stop(struct hnae3_handle *handle) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int i, queue_id; - for (i = 0; i < hdev->num_tqps; i++) { + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + + for (i = 0; i < kinfo->num_tqps; i++) { /* Ring disable */ - queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); + queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); if (queue_id < 0) { dev_warn(&hdev->pdev->dev, "Get invalid queue id, ignore it\n"); @@ -1619,17 +1767,22 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, ret = client->ops->init_instance(&hdev->nic); if (ret) - return ret; + goto clear_nic; + + hnae3_set_client_init_flag(client, ae_dev, 1); if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; ret = hclgevf_init_roce_base_info(hdev); if (ret) - return ret; + goto clear_roce; ret = rc->ops->init_instance(&hdev->roce); if (ret) - return ret; + goto clear_roce; + + hnae3_set_client_init_flag(hdev->roce_client, ae_dev, + 1); } break; case HNAE3_CLIENT_UNIC: @@ -1638,7 +1791,9 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, ret = client->ops->init_instance(&hdev->nic); if (ret) - return ret; + goto clear_nic; + + hnae3_set_client_init_flag(client, ae_dev, 1); break; case HNAE3_CLIENT_ROCE: if (hnae3_dev_roce_supported(hdev)) { @@ -1649,15 +1804,29 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, if (hdev->roce_client && hdev->nic_client) { ret = hclgevf_init_roce_base_info(hdev); if (ret) - return ret; + goto clear_roce; ret = client->ops->init_instance(&hdev->roce); if (ret) - return ret; + goto clear_roce; } + + hnae3_set_client_init_flag(client, ae_dev, 1); + break; + default: + return -EINVAL; } return 0; + +clear_nic: + hdev->nic_client = NULL; + hdev->nic.client = NULL; + return ret; +clear_roce: + hdev->roce_client = NULL; + hdev->roce.client = NULL; + return ret; } static void hclgevf_uninit_client_instance(struct hnae3_client *client, @@ -1666,13 +1835,19 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client, struct hclgevf_dev *hdev = ae_dev->priv; /* un-init roce, if it exists */ - if (hdev->roce_client) + if (hdev->roce_client) { hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); + hdev->roce_client = NULL; + hdev->roce.client = NULL; + } /* un-init nic/unic, if this was not called by roce client */ - if ((client->ops->uninit_instance) && - (client->type != HNAE3_CLIENT_ROCE)) + if (client->ops->uninit_instance && hdev->nic_client && + client->type != HNAE3_CLIENT_ROCE) { client->ops->uninit_instance(&hdev->nic, 0); + hdev->nic_client = NULL; + hdev->nic.client = NULL; + } } static int hclgevf_pci_init(struct hclgevf_dev *hdev) @@ -1839,14 +2014,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* Initialize mta type for this VF */ - ret = hclgevf_cfg_func_mta_type(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed(%d) to initialize MTA type\n", ret); - goto err_config; - } - /* Initialize RSS for this VF */ ret = hclgevf_rss_init_hw(hdev); if (ret) { @@ -1943,11 +2110,11 @@ static void hclgevf_get_channels(struct hnae3_handle *handle, } static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, - u16 *free_tqps, u16 *max_rss_size) + u16 *alloc_tqps, u16 *max_rss_size) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - *free_tqps = 0; + *alloc_tqps = hdev->num_tqps; *max_rss_size = hdev->rss_size_max; } @@ -1979,6 +2146,14 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, hdev->hw.mac.duplex = duplex; } +static void hclgevf_get_media_type(struct hnae3_handle *handle, + u8 *media_type) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + if (media_type) + *media_type = hdev->hw.mac.media_type; +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, @@ -1998,7 +2173,6 @@ static const struct hnae3_ae_ops hclgevf_ops = { .rm_uc_addr = hclgevf_rm_uc_addr, .add_mc_addr = hclgevf_add_mc_addr, .rm_mc_addr = hclgevf_rm_mc_addr, - .update_mta_status = hclgevf_update_mta_status, .get_stats = hclgevf_get_stats, .update_stats = hclgevf_update_stats, .get_strings = hclgevf_get_strings, @@ -2007,6 +2181,8 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_rss_indir_size = hclgevf_get_rss_indir_size, .get_rss = hclgevf_get_rss, .set_rss = hclgevf_set_rss, + .get_rss_tuple = hclgevf_get_rss_tuple, + .set_rss_tuple = hclgevf_set_rss_tuple, .get_tc_size = hclgevf_get_tc_size, .get_fw_version = hclgevf_get_fw_version, .set_vlan_filter = hclgevf_set_vlan_filter, @@ -2016,6 +2192,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, .get_status = hclgevf_get_status, .get_ksettings_an_result = hclgevf_get_ksettings_an_result, + .get_media_type = hclgevf_get_media_type, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index b23ba171473c..aed241e8ffab 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -46,9 +46,13 @@ #define HCLGEVF_RSS_HASH_ALGO_MASK 0xf #define HCLGEVF_RSS_CFG_TBL_NUM \ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE) - -#define HCLGEVF_MTA_TBL_SIZE 4096 -#define HCLGEVF_MTA_TYPE_SEL_MAX 4 +#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) +#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) +#define HCLGEVF_D_PORT_BIT BIT(0) +#define HCLGEVF_S_PORT_BIT BIT(1) +#define HCLGEVF_D_IP_BIT BIT(2) +#define HCLGEVF_S_IP_BIT BIT(3) +#define HCLGEVF_V_TAG_BIT BIT(4) /* states of hclgevf device & tasks */ enum hclgevf_states { @@ -66,6 +70,7 @@ enum hclgevf_states { #define HCLGEVF_MPF_ENBALE 1 struct hclgevf_mac { + u8 media_type; u8 mac_addr[ETH_ALEN]; int link; u8 duplex; @@ -108,12 +113,24 @@ struct hclgevf_cfg { u32 numa_node_map; }; +struct hclgevf_rss_tuple_cfg { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; +}; + struct hclgevf_rss_cfg { u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */ u32 hash_algo; u32 rss_size; u8 hw_tc_map; u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */ + struct hclgevf_rss_tuple_cfg rss_tuple_sets; }; struct hclgevf_misc_vector { @@ -156,8 +173,6 @@ struct hclgevf_dev { u16 *vector_status; int *vector_irq; - bool accept_mta_mc; /* whether to accept mta filter multicast */ - u8 mta_mac_sel_type; bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index 0f5563f3b779..097b5502603f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -58,6 +58,8 @@ enum hinic_port_cmd { HINIC_PORT_CMD_GET_GLOBAL_QPN = 102, + HINIC_PORT_CMD_SET_TSO = 112, + HINIC_PORT_CMD_GET_CAP = 170, }; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c index cb239627770f..967c993d5303 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c @@ -70,8 +70,6 @@ #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) -#define TX_MAX_MSS_DEFAULT 0x3E00 - enum sq_wqe_type { SQ_NORMAL_WQE = 0, }; @@ -494,33 +492,16 @@ static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx, HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | HINIC_SQ_CTRL_SET(ctrl_size, LEN); - ctrl->queue_info = HINIC_SQ_CTRL_SET(TX_MAX_MSS_DEFAULT, - QUEUE_INFO_MSS); + ctrl->queue_info = HINIC_SQ_CTRL_SET(HINIC_MSS_DEFAULT, + QUEUE_INFO_MSS) | + HINIC_SQ_CTRL_SET(1, QUEUE_INFO_UC); } static void sq_prepare_task(struct hinic_sq_task *task) { - task->pkt_info0 = - HINIC_SQ_TASK_INFO0_SET(0, L2HDR_LEN) | - HINIC_SQ_TASK_INFO0_SET(HINIC_L4_OFF_DISABLE, L4_OFFLOAD) | - HINIC_SQ_TASK_INFO0_SET(HINIC_OUTER_L3TYPE_UNKNOWN, - INNER_L3TYPE) | - HINIC_SQ_TASK_INFO0_SET(HINIC_VLAN_OFF_DISABLE, - VLAN_OFFLOAD) | - HINIC_SQ_TASK_INFO0_SET(HINIC_PKT_NOT_PARSED, PARSE_FLAG); - - task->pkt_info1 = - HINIC_SQ_TASK_INFO1_SET(HINIC_MEDIA_UNKNOWN, MEDIA_TYPE) | - HINIC_SQ_TASK_INFO1_SET(0, INNER_L4_LEN) | - HINIC_SQ_TASK_INFO1_SET(0, INNER_L3_LEN); - - task->pkt_info2 = - HINIC_SQ_TASK_INFO2_SET(0, TUNNEL_L4_LEN) | - HINIC_SQ_TASK_INFO2_SET(0, OUTER_L3_LEN) | - HINIC_SQ_TASK_INFO2_SET(HINIC_TUNNEL_L4TYPE_UNKNOWN, - TUNNEL_L4TYPE) | - HINIC_SQ_TASK_INFO2_SET(HINIC_OUTER_L3TYPE_UNKNOWN, - OUTER_L3TYPE); + task->pkt_info0 = 0; + task->pkt_info1 = 0; + task->pkt_info2 = 0; task->ufo_v6_identify = 0; @@ -529,6 +510,86 @@ static void sq_prepare_task(struct hinic_sq_task *task) task->zero_pad = 0; } +void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len) +{ + task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(len, L2HDR_LEN); +} + +void hinic_task_set_outter_l3(struct hinic_sq_task *task, + enum hinic_l3_offload_type l3_type, + u32 network_len) +{ + task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) | + HINIC_SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN); +} + +void hinic_task_set_inner_l3(struct hinic_sq_task *task, + enum hinic_l3_offload_type l3_type, + u32 network_len) +{ + task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE); + task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(network_len, INNER_L3LEN); +} + +void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, + enum hinic_l4_offload_type l4_type, + u32 tunnel_len) +{ + task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) | + HINIC_SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN); +} + +void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info, + enum hinic_l4_offload_type l4_offload, + u32 l4_len, u32 offset) +{ + u32 tcp_udp_cs = 0, sctp = 0; + u32 mss = HINIC_MSS_DEFAULT; + + if (l4_offload == TCP_OFFLOAD_ENABLE || + l4_offload == UDP_OFFLOAD_ENABLE) + tcp_udp_cs = 1; + else if (l4_offload == SCTP_OFFLOAD_ENABLE) + sctp = 1; + + task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD); + task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); + + *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) | + HINIC_SQ_CTRL_SET(tcp_udp_cs, QUEUE_INFO_TCPUDP_CS) | + HINIC_SQ_CTRL_SET(sctp, QUEUE_INFO_SCTP); + + *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); + *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS); +} + +void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info, + enum hinic_l4_offload_type l4_offload, + u32 l4_len, u32 offset, u32 ip_ident, u32 mss) +{ + u32 tso = 0, ufo = 0; + + if (l4_offload == TCP_OFFLOAD_ENABLE) + tso = 1; + else if (l4_offload == UDP_OFFLOAD_ENABLE) + ufo = 1; + + task->ufo_v6_identify = ip_ident; + + task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD); + task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(tso || ufo, TSO_FLAG); + task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); + + *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) | + HINIC_SQ_CTRL_SET(tso, QUEUE_INFO_TSO) | + HINIC_SQ_CTRL_SET(ufo, QUEUE_INFO_UFO) | + HINIC_SQ_CTRL_SET(!!l4_offload, QUEUE_INFO_TCPUDP_CS); + + /* set MSS value */ + *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); + *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS); +} + /** * hinic_sq_prepare_wqe - prepare wqe before insert to the queue * @sq: send queue @@ -613,6 +674,16 @@ struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, } /** + * hinic_sq_return_wqe - return the wqe to the sq + * @sq: send queue + * @wqe_size: the size of the wqe + **/ +void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size) +{ + hinic_return_wqe(sq->wq, wqe_size); +} + +/** * hinic_sq_write_wqe - write the wqe to the sq * @sq: send queue * @prod_idx: pi of the wqe diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h index 6c84f83ec283..a0dc63a4bfc7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h @@ -149,6 +149,31 @@ int hinic_get_sq_free_wqebbs(struct hinic_sq *sq); int hinic_get_rq_free_wqebbs(struct hinic_rq *rq); +void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len); + +void hinic_task_set_outter_l3(struct hinic_sq_task *task, + enum hinic_l3_offload_type l3_type, + u32 network_len); + +void hinic_task_set_inner_l3(struct hinic_sq_task *task, + enum hinic_l3_offload_type l3_type, + u32 network_len); + +void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, + enum hinic_l4_offload_type l4_type, + u32 tunnel_len); + +void hinic_set_cs_inner_l4(struct hinic_sq_task *task, + u32 *queue_info, + enum hinic_l4_offload_type l4_offload, + u32 l4_len, u32 offset); + +void hinic_set_tso_inner_l4(struct hinic_sq_task *task, + u32 *queue_info, + enum hinic_l4_offload_type l4_offload, + u32 l4_len, + u32 offset, u32 ip_ident, u32 mss); + void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, struct hinic_sq_wqe *wqe, struct hinic_sge *sges, int nr_sges); @@ -159,6 +184,8 @@ void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, unsigned int wqe_size, u16 *prod_idx); +void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size); + void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, struct hinic_sq_wqe *wqe, struct sk_buff *skb, unsigned int wqe_size); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index 3e3181c089bd..f92f1bf3901a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -775,6 +775,20 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, } /** + * hinic_return_wqe - return the wqe when transmit failed + * @wq: wq to return wqe + * @wqe_size: wqe size + **/ +void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size) +{ + int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + + atomic_sub(num_wqebbs, &wq->prod_idx); + + atomic_add(num_wqebbs, &wq->delta); +} + +/** * hinic_put_wqe - return the wqe place to use for a new wqe * @wq: wq to return wqe * @wqe_size: wqe size diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h index 9c030a0f035e..9b66545ba563 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h @@ -104,6 +104,8 @@ void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq); struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, u16 *prod_idx); +void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size); + void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size); struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h index bc73485483c5..9754d6ed5f4a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h @@ -62,19 +62,33 @@ (((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \ & HINIC_CMDQ_WQE_HEADER_##member##_MASK) -#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 -#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16 -#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22 -#define HINIC_SQ_CTRL_LEN_SHIFT 29 - -#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF -#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F -#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1 -#define HINIC_SQ_CTRL_LEN_MASK 0x3 - -#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 - -#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF +#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 +#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16 +#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22 +#define HINIC_SQ_CTRL_LEN_SHIFT 29 + +#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF +#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F +#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1 +#define HINIC_SQ_CTRL_LEN_MASK 0x3 + +#define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2 +#define HINIC_SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10 +#define HINIC_SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11 +#define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12 +#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 +#define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27 +#define HINIC_SQ_CTRL_QUEUE_INFO_UC_SHIFT 28 +#define HINIC_SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29 + +#define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFF +#define HINIC_SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1 +#define HINIC_SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1 +#define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1 +#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF +#define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1 +#define HINIC_SQ_CTRL_QUEUE_INFO_UC_MASK 0x1 +#define HINIC_SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7 #define HINIC_SQ_CTRL_SET(val, member) \ (((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \ @@ -84,6 +98,10 @@ (((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \ & HINIC_SQ_CTRL_##member##_MASK) +#define HINIC_SQ_CTRL_CLEAR(val, member) \ + ((u32)(val) & (~(HINIC_SQ_CTRL_##member##_MASK \ + << HINIC_SQ_CTRL_##member##_SHIFT))) + #define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0 #define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8 #define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10 @@ -108,28 +126,28 @@ /* 8 bits reserved */ #define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8 -#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_SHIFT 16 -#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_SHIFT 24 +#define HINIC_SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16 +#define HINIC_SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24 /* 8 bits reserved */ #define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF -#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_MASK 0xFF -#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_MASK 0xFF +#define HINIC_SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFF +#define HINIC_SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFF #define HINIC_SQ_TASK_INFO1_SET(val, member) \ (((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \ HINIC_SQ_TASK_INFO1_##member##_SHIFT) -#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_SHIFT 0 -#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_SHIFT 12 -#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 19 +#define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0 +#define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8 +#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16 /* 1 bit reserved */ -#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 22 +#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24 /* 8 bits reserved */ -#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_MASK 0xFFF -#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_MASK 0x7F -#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x3 +#define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFF +#define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFF +#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7 /* 1 bit reserved */ #define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3 /* 8 bits reserved */ @@ -187,12 +205,15 @@ sizeof(struct hinic_sq_task) + \ (nr_sges) * sizeof(struct hinic_sq_bufdesc)) -#define HINIC_SCMD_DATA_LEN 16 +#define HINIC_SCMD_DATA_LEN 16 + +#define HINIC_MAX_SQ_BUFDESCS 17 -#define HINIC_MAX_SQ_BUFDESCS 17 +#define HINIC_SQ_WQE_MAX_SIZE 320 +#define HINIC_RQ_WQE_SIZE 32 -#define HINIC_SQ_WQE_MAX_SIZE 320 -#define HINIC_RQ_WQE_SIZE 32 +#define HINIC_MSS_DEFAULT 0x3E00 +#define HINIC_MSS_MIN 0x50 enum hinic_l4offload_type { HINIC_L4_OFF_DISABLE = 0, @@ -211,6 +232,26 @@ enum hinic_pkt_parsed { HINIC_PKT_PARSED = 1, }; +enum hinic_l3_offload_type { + L3TYPE_UNKNOWN = 0, + IPV6_PKT = 1, + IPV4_PKT_NO_CHKSUM_OFFLOAD = 2, + IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3, +}; + +enum hinic_l4_offload_type { + OFFLOAD_DISABLE = 0, + TCP_OFFLOAD_ENABLE = 1, + SCTP_OFFLOAD_ENABLE = 2, + UDP_OFFLOAD_ENABLE = 3, +}; + +enum hinic_l4_tunnel_type { + NOT_TUNNEL, + TUNNEL_UDP_NO_CSUM, + TUNNEL_UDP_CSUM, +}; + enum hinic_outer_l3type { HINIC_OUTER_L3TYPE_UNKNOWN = 0, HINIC_OUTER_L3TYPE_IPV6 = 1, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 4a8f82938ed5..fdf2bdb6b0d0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -805,7 +805,8 @@ static const struct net_device_ops hinic_netdev_ops = { static void netdev_features_init(struct net_device *netdev) { - netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; + netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; netdev->vlan_features = netdev->hw_features; @@ -863,6 +864,20 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, *out_size = sizeof(*ret_link_status); } +static int set_features(struct hinic_dev *nic_dev, + netdev_features_t pre_features, + netdev_features_t features, bool force_change) +{ + netdev_features_t changed = force_change ? ~0 : pre_features ^ features; + int err = 0; + + if (changed & NETIF_F_TSO) + err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ? + HINIC_TSO_ENABLE : HINIC_TSO_DISABLE); + + return err; +} + /** * nic_dev_init - Initialize the NIC device * @pdev: the NIC pci device @@ -963,7 +978,12 @@ static int nic_dev_init(struct pci_dev *pdev) hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, nic_dev, link_status_event_handler); + err = set_features(nic_dev, 0, nic_dev->netdev->features, true); + if (err) + goto err_set_features; + SET_NETDEV_DEV(netdev, &pdev->dev); + err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "Failed to register netdev\n"); @@ -973,6 +993,7 @@ static int nic_dev_init(struct pci_dev *pdev) return 0; err_reg_netdev: +err_set_features: hinic_hwdev_cb_unregister(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS); cancel_work_sync(&rx_mode_work->work); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index 4d4e3f05fb5f..7575a7d3bd9f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -377,3 +377,35 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev, return 0; } + +/** + * hinic_port_set_tso - set port tso configuration + * @nic_dev: nic device + * @state: the tso state to set + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct hinic_tso_config tso_cfg = {0}; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + tso_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); + tso_cfg.tso_en = state; + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_TSO, + &tso_cfg, sizeof(tso_cfg), + &tso_cfg, &out_size); + if (err || out_size != sizeof(tso_cfg) || tso_cfg.status) { + dev_err(&pdev->dev, + "Failed to set port tso, ret = %d\n", + tso_cfg.status); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index 9404365195dd..f6e3220fe28f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -72,6 +72,11 @@ enum hinic_speed { HINIC_SPEED_UNKNOWN = 0xFF, }; +enum hinic_tso_state { + HINIC_TSO_DISABLE = 0, + HINIC_TSO_ENABLE = 1, +}; + struct hinic_port_mac_cmd { u8 status; u8 version; @@ -167,6 +172,17 @@ struct hinic_port_cap { u8 rsvd2[3]; }; +struct hinic_tso_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 tso_en; + u8 resv2[3]; +}; + int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id); @@ -195,4 +211,6 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev, int hinic_port_get_cap(struct hinic_dev *nic_dev, struct hinic_port_cap *port_cap); +int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index c5fca0356c9c..11e73e67358d 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -26,6 +26,13 @@ #include <linux/skbuff.h> #include <linux/smp.h> #include <asm/byteorder.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/sctp.h> +#include <linux/ipv6.h> +#include <net/ipv6.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h> #include "hinic_common.h" #include "hinic_hw_if.h" @@ -45,9 +52,31 @@ #define CI_UPDATE_NO_PENDING 0 #define CI_UPDATE_NO_COALESC 0 -#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) +#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) -#define MIN_SKB_LEN 64 +#define MIN_SKB_LEN 17 + +#define MAX_PAYLOAD_OFFSET 221 +#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) + +union hinic_l3 { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union hinic_l4 { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +enum hinic_offload_type { + TX_OFFLOAD_TSO = BIT(0), + TX_OFFLOAD_CSUM = BIT(1), + TX_OFFLOAD_VLAN = BIT(2), + TX_OFFLOAD_INVALID = BIT(3), +}; /** * hinic_txq_clean_stats - Clean the statistics of specific queue @@ -175,18 +204,263 @@ static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, DMA_TO_DEVICE); } +static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip, + union hinic_l4 *l4, + enum hinic_offload_type offload_type, + enum hinic_l3_offload_type *l3_type, + u8 *l4_proto) +{ + u8 *exthdr; + + if (ip->v4->version == 4) { + *l3_type = (offload_type == TX_OFFLOAD_CSUM) ? + IPV4_PKT_NO_CHKSUM_OFFLOAD : + IPV4_PKT_WITH_CHKSUM_OFFLOAD; + *l4_proto = ip->v4->protocol; + } else if (ip->v4->version == 6) { + *l3_type = IPV6_PKT; + exthdr = ip->hdr + sizeof(*ip->v6); + *l4_proto = ip->v6->nexthdr; + if (exthdr != l4->hdr) { + int start = exthdr - skb->data; + __be16 frag_off; + + ipv6_skip_exthdr(skb, start, l4_proto, &frag_off); + } + } else { + *l3_type = L3TYPE_UNKNOWN; + *l4_proto = 0; + } +} + +static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4, + enum hinic_offload_type offload_type, u8 l4_proto, + enum hinic_l4_offload_type *l4_offload, + u32 *l4_len, u32 *offset) +{ + *l4_offload = OFFLOAD_DISABLE; + *offset = 0; + *l4_len = 0; + + switch (l4_proto) { + case IPPROTO_TCP: + *l4_offload = TCP_OFFLOAD_ENABLE; + /* doff in unit of 4B */ + *l4_len = l4->tcp->doff * 4; + *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb); + break; + + case IPPROTO_UDP: + *l4_offload = UDP_OFFLOAD_ENABLE; + *l4_len = sizeof(struct udphdr); + *offset = TRANSPORT_OFFSET(l4->hdr, skb); + break; + + case IPPROTO_SCTP: + /* only csum offload support sctp */ + if (offload_type != TX_OFFLOAD_CSUM) + break; + + *l4_offload = SCTP_OFFLOAD_ENABLE; + *l4_len = sizeof(struct sctphdr); + *offset = TRANSPORT_OFFSET(l4->hdr, skb); + break; + + default: + break; + } +} + +static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto) +{ + return (ip->v4->version == 4) ? + csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : + csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); +} + +static int offload_tso(struct hinic_sq_task *task, u32 *queue_info, + struct sk_buff *skb) +{ + u32 offset, l4_len, ip_identify, network_hdr_len; + enum hinic_l3_offload_type l3_offload; + enum hinic_l4_offload_type l4_offload; + union hinic_l3 ip; + union hinic_l4 l4; + u8 l4_proto; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_cow_head(skb, 0) < 0) + return -EPROTONOSUPPORT; + + if (skb->encapsulation) { + u32 gso_type = skb_shinfo(skb)->gso_type; + u32 tunnel_type = 0; + u32 l4_tunnel_len; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + network_hdr_len = skb_inner_network_header_len(skb); + + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD; + } else if (ip.v4->version == 6) { + l3_offload = IPV6_PKT; + } else { + l3_offload = 0; + } + + hinic_task_set_outter_l3(task, l3_offload, + skb_network_header_len(skb)); + + if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); + tunnel_type = TUNNEL_UDP_CSUM; + } else if (gso_type & SKB_GSO_UDP_TUNNEL) { + tunnel_type = TUNNEL_UDP_NO_CSUM; + } + + l4_tunnel_len = skb_inner_network_offset(skb) - + skb_transport_offset(skb); + hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len); + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + } else { + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + network_hdr_len = skb_network_header_len(skb); + } + + /* initialize inner IP header fields */ + if (ip.v4->version == 4) + ip.v4->tot_len = 0; + else + ip.v6->payload_len = 0; + + get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload, + &l4_proto); + + hinic_task_set_inner_l3(task, l3_offload, network_hdr_len); + + ip_identify = 0; + if (l4_proto == IPPROTO_TCP) + l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP); + + get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload, + &l4_len, &offset); + + hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset, + ip_identify, skb_shinfo(skb)->gso_size); + + return 1; +} + +static int offload_csum(struct hinic_sq_task *task, u32 *queue_info, + struct sk_buff *skb) +{ + enum hinic_l4_offload_type l4_offload; + u32 offset, l4_len, network_hdr_len; + enum hinic_l3_offload_type l3_type; + union hinic_l3 ip; + union hinic_l4 l4; + u8 l4_proto; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (skb->encapsulation) { + u32 l4_tunnel_len; + + ip.hdr = skb_network_header(skb); + + if (ip.v4->version == 4) + l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD; + else if (ip.v4->version == 6) + l3_type = IPV6_PKT; + else + l3_type = L3TYPE_UNKNOWN; + + hinic_task_set_outter_l3(task, l3_type, + skb_network_header_len(skb)); + + l4_tunnel_len = skb_inner_network_offset(skb) - + skb_transport_offset(skb); + + hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM, + l4_tunnel_len); + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + network_hdr_len = skb_inner_network_header_len(skb); + } else { + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + network_hdr_len = skb_network_header_len(skb); + } + + get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type, + &l4_proto); + + hinic_task_set_inner_l3(task, l3_type, network_hdr_len); + + get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload, + &l4_len, &offset); + + hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset); + + return 1; +} + +static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task, + u32 *queue_info) +{ + enum hinic_offload_type offload = 0; + int enabled; + + enabled = offload_tso(task, queue_info, skb); + if (enabled > 0) { + offload |= TX_OFFLOAD_TSO; + } else if (enabled == 0) { + enabled = offload_csum(task, queue_info, skb); + if (enabled) + offload |= TX_OFFLOAD_CSUM; + } else { + return -EPROTONOSUPPORT; + } + + if (offload) + hinic_task_set_l2hdr(task, skb_network_offset(skb)); + + /* payload offset should not more than 221 */ + if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) > + MAX_PAYLOAD_OFFSET) { + return -EPROTONOSUPPORT; + } + + /* mss should not less than 80 */ + if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) { + *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); + *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS); + } + + return 0; +} + netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct hinic_dev *nic_dev = netdev_priv(netdev); + u16 prod_idx, q_id = skb->queue_mapping; struct netdev_queue *netdev_txq; int nr_sges, err = NETDEV_TX_OK; struct hinic_sq_wqe *sq_wqe; unsigned int wqe_size; struct hinic_txq *txq; struct hinic_qp *qp; - u16 prod_idx; - txq = &nic_dev->txqs[skb->queue_mapping]; + txq = &nic_dev->txqs[q_id]; qp = container_of(txq->sq, struct hinic_qp, sq); if (skb->len < MIN_SKB_LEN) { @@ -236,15 +510,23 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) process_sq_wqe: hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); + err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info); + if (err) + goto offload_error; + hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); flush_skbs: - netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping); + netdev_txq = netdev_get_tx_queue(netdev, q_id); if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq))) hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); return err; +offload_error: + hinic_sq_return_wqe(txq->sq, wqe_size); + tx_unmap_skb(nic_dev, skb, txq->sges); + skb_error: dev_kfree_skb_any(skb); @@ -252,7 +534,8 @@ update_error_stats: u64_stats_update_begin(&txq->txq_stats.syncp); txq->txq_stats.tx_dropped++; u64_stats_update_end(&txq->txq_stats.syncp); - return err; + + return NETDEV_TX_OK; } /** diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 03f64f40b2a3..3baabdc89726 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -778,12 +778,11 @@ static void check_sqs(struct ehea_port *port) { struct ehea_swqe *swqe; int swqe_index; - int i, k; + int i; for (i = 0; i < port->num_def_qps; i++) { struct ehea_port_res *pr = &port->port_res[i]; int ret; - k = 0; swqe = ehea_get_swqe(pr->qp, &swqe_index); memset(swqe, 0, SWQE_HEADER_SIZE); atomic_dec(&pr->swqe_avail); @@ -2027,7 +2026,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, dev_consume_skb_any(skb); } -static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); struct ehea_swqe *swqe; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c index a0820f72b25c..5e4e37132bf2 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c @@ -125,7 +125,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, struct ehea_cq *cq; struct h_epa epa; u64 *cq_handle_ref, hret, rpage; - u32 act_nr_of_entries, act_pages, counter; + u32 counter; int ret; void *vpage; @@ -140,8 +140,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, cq->adapter = adapter; cq_handle_ref = &cq->fw_handle; - act_nr_of_entries = 0; - act_pages = 0; hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, &cq->fw_handle, &cq->epas); diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 129f4e9f38da..760b2ad8e295 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -423,7 +423,7 @@ static void emac_hash_mc(struct emac_instance *dev) { const int regs = EMAC_XAHT_REGS(dev); u32 *gaht_base = emac_gaht_base(dev); - u32 gaht_temp[regs]; + u32 gaht_temp[EMAC_XAHT_MAX_REGS]; struct netdev_hw_addr *ha; int i; @@ -1409,7 +1409,7 @@ static inline u16 emac_tx_csum(struct emac_instance *dev, return 0; } -static inline int emac_xmit_finish(struct emac_instance *dev, int len) +static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len) { struct emac_regs __iomem *p = dev->emacp; struct net_device *ndev = dev->ndev; @@ -1436,7 +1436,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len) } /* Tx lock BH */ -static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); unsigned int len = skb->len; @@ -1494,7 +1494,8 @@ static inline int emac_xmit_split(struct emac_instance *dev, int slot, } /* Tx lock BH disabled (SG version for TAH equipped EMACs) */ -static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); int nr_frags = skb_shinfo(skb)->nr_frags; @@ -2969,6 +2970,10 @@ static int emac_init_config(struct emac_instance *dev) dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT; } + /* This should never happen */ + if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS)) + return -ENXIO; + DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE); DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige); DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 369de2cfb15b..84caa4a3fc52 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -390,6 +390,9 @@ static inline int emac_has_feature(struct emac_instance *dev, #define EMAC4SYNC_XAHT_SLOTS_SHIFT 8 #define EMAC4SYNC_XAHT_WIDTH_SHIFT 5 +/* The largest span between slots and widths above is 3 */ +#define EMAC_XAHT_MAX_REGS (1 << 3) + #define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift) #define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift) #define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \ diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h index eeade2ea8334..e4c20f0024f6 100644 --- a/drivers/net/ethernet/ibm/emac/mal.h +++ b/drivers/net/ethernet/ibm/emac/mal.h @@ -136,7 +136,7 @@ static inline int mal_rx_size(int len) static inline int mal_tx_chunks(int len) { - return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE; + return DIV_ROUND_UP(len, MAL_MAX_TX_SIZE); } #define MAL_CHAN_MASK(n) (0x80000000 >> (n)) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 525d8b89187b..a4681780a55d 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -24,7 +24,6 @@ */ #include <linux/module.h> -#include <linux/moduleparam.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/dma-mapping.h> diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 699ef942b615..7893beffcc71 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1428,7 +1428,7 @@ static int ibmvnic_xmit_workarounds(struct sk_buff *skb, return 0; } -static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); int queue_num = skb_get_queue_mapping(skb); @@ -1452,7 +1452,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) u64 *handle_array; int index = 0; u8 proto = 0; - int ret = 0; + netdev_tx_t ret = NETDEV_TX_OK; if (adapter->resetting) { if (!netif_subqueue_stopped(netdev, skb)) @@ -2348,8 +2348,13 @@ static void ibmvnic_get_ringparam(struct net_device *netdev, { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; - ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; + if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { + ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; + ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; + } else { + ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ; + ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ; + } ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; @@ -2362,21 +2367,23 @@ static int ibmvnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int ret; - if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || - ring->tx_pending > adapter->max_tx_entries_per_subcrq) { - netdev_err(netdev, "Invalid request.\n"); - netdev_err(netdev, "Max tx buffers = %llu\n", - adapter->max_rx_add_entries_per_subcrq); - netdev_err(netdev, "Max rx buffers = %llu\n", - adapter->max_tx_entries_per_subcrq); - return -EINVAL; - } - + ret = 0; adapter->desired.rx_entries = ring->rx_pending; adapter->desired.tx_entries = ring->tx_pending; - return wait_for_reset(adapter); + ret = wait_for_reset(adapter); + + if (!ret && + (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending || + adapter->req_tx_entries_per_subcrq != ring->tx_pending)) + netdev_info(netdev, + "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", + ring->rx_pending, ring->tx_pending, + adapter->req_rx_add_entries_per_subcrq, + adapter->req_tx_entries_per_subcrq); + return ret; } static void ibmvnic_get_channels(struct net_device *netdev, @@ -2384,8 +2391,14 @@ static void ibmvnic_get_channels(struct net_device *netdev, { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - channels->max_rx = adapter->max_rx_queues; - channels->max_tx = adapter->max_tx_queues; + if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { + channels->max_rx = adapter->max_rx_queues; + channels->max_tx = adapter->max_tx_queues; + } else { + channels->max_rx = IBMVNIC_MAX_QUEUES; + channels->max_tx = IBMVNIC_MAX_QUEUES; + } + channels->max_other = 0; channels->max_combined = 0; channels->rx_count = adapter->req_rx_queues; @@ -2398,11 +2411,23 @@ static int ibmvnic_set_channels(struct net_device *netdev, struct ethtool_channels *channels) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int ret; + ret = 0; adapter->desired.rx_queues = channels->rx_count; adapter->desired.tx_queues = channels->tx_count; - return wait_for_reset(adapter); + ret = wait_for_reset(adapter); + + if (!ret && + (adapter->req_rx_queues != channels->rx_count || + adapter->req_tx_queues != channels->tx_count)) + netdev_info(netdev, + "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", + channels->rx_count, channels->tx_count, + adapter->req_rx_queues, adapter->req_tx_queues); + return ret; + } static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -2410,32 +2435,43 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) struct ibmvnic_adapter *adapter = netdev_priv(dev); int i; - if (stringset != ETH_SS_STATS) - return; + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); + i++, data += ETH_GSTRING_LEN) + memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); - for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) - memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); + for (i = 0; i < adapter->req_tx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); + data += ETH_GSTRING_LEN; - for (i = 0; i < adapter->req_tx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); - data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); + data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); - data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, + "tx%d_dropped_packets", i); + data += ETH_GSTRING_LEN; + } - snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); - data += ETH_GSTRING_LEN; - } + for (i = 0; i < adapter->req_rx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); + data += ETH_GSTRING_LEN; - for (i = 0; i < adapter->req_rx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); - data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); + data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); - data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); + data += ETH_GSTRING_LEN; + } + break; - snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); - data += ETH_GSTRING_LEN; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++) + strcpy(data + i * ETH_GSTRING_LEN, + ibmvnic_priv_flags[i]); + break; + default: + return; } } @@ -2448,6 +2484,8 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset) return ARRAY_SIZE(ibmvnic_stats) + adapter->req_tx_queues * NUM_TX_STATS + adapter->req_rx_queues * NUM_RX_STATS; + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(ibmvnic_priv_flags); default: return -EOPNOTSUPP; } @@ -2498,6 +2536,25 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, } } +static u32 ibmvnic_get_priv_flags(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + return adapter->priv_flags; +} + +static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES); + + if (which_maxes) + adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES; + else + adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES; + + return 0; +} static const struct ethtool_ops ibmvnic_ethtool_ops = { .get_drvinfo = ibmvnic_get_drvinfo, .get_msglevel = ibmvnic_get_msglevel, @@ -2511,6 +2568,8 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = { .get_sset_count = ibmvnic_get_sset_count, .get_ethtool_stats = ibmvnic_get_ethtool_stats, .get_link_ksettings = ibmvnic_get_link_ksettings, + .get_priv_flags = ibmvnic_get_priv_flags, + .set_priv_flags = ibmvnic_set_priv_flags, }; /* Routines for managing CRQs/sCRQs */ diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index f06eec145ca6..18103b811d4d 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -39,7 +39,8 @@ #define IBMVNIC_RX_WEIGHT 16 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */ #define IBMVNIC_BUFFS_PER_POOL 100 -#define IBMVNIC_MAX_QUEUES 10 +#define IBMVNIC_MAX_QUEUES 16 +#define IBMVNIC_MAX_QUEUE_SZ 4096 #define IBMVNIC_TSO_BUF_SZ 65536 #define IBMVNIC_TSO_BUFS 64 @@ -48,6 +49,11 @@ #define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE) #define IBMVNIC_BUFFER_HLEN 500 +static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = { +#define IBMVNIC_USE_SERVER_MAXES 0x1 + "use-server-maxes" +}; + struct ibmvnic_login_buffer { __be32 len; __be32 version; @@ -969,6 +975,7 @@ struct ibmvnic_adapter { struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; dma_addr_t ip_offload_ctrl_tok; u32 msg_enable; + u32 priv_flags; /* Vital Product Data (VPD) */ struct ibmvnic_vpd *vpd; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 1ab613eb5796..fd3373d82a9e 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -68,6 +68,9 @@ config E1000E <http://support.intel.com> + More specific information on configuring the driver is in + <file:Documentation/networking/e1000e.rst>. + To compile this driver as a module, choose M here. The module will be called e1000e. @@ -94,7 +97,7 @@ config IGB <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/e1000.rst>. + <file:Documentation/networking/igb.rst>. To compile this driver as a module, choose M here. The module will be called igb. @@ -130,7 +133,7 @@ config IGBVF <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/e1000.rst>. + <file:Documentation/networking/igbvf.rst>. To compile this driver as a module, choose M here. The module will be called igbvf. @@ -147,7 +150,7 @@ config IXGB <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/ixgb.txt>. + <file:Documentation/networking/ixgb.rst>. To compile this driver as a module, choose M here. The module will be called ixgb. @@ -164,6 +167,9 @@ config IXGBE <http://support.intel.com> + More specific information on configuring the driver is in + <file:Documentation/networking/ixgbe.rst>. + To compile this driver as a module, choose M here. The module will be called ixgbe. @@ -205,7 +211,7 @@ config IXGBEVF <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/ixgbevf.txt>. + <file:Documentation/networking/ixgbevf.rst>. To compile this driver as a module, choose M here. The module will be called ixgbevf. MSI-X interrupt support is required @@ -222,6 +228,9 @@ config I40E <http://support.intel.com> + More specific information on configuring the driver is in + <file:Documentation/networking/i40e.rst>. + To compile this driver as a module, choose M here. The module will be called i40e. @@ -235,20 +244,30 @@ config I40E_DCB If unsure, say N. +# this is here to allow seamless migration from I40EVF --> IAVF name +# so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF +config IAVF + tristate config I40EVF tristate "Intel(R) Ethernet Adaptive Virtual Function support" + select IAVF depends on PCI_MSI ---help--- This driver supports virtual functions for Intel XL710, - X710, X722, and all devices advertising support for Intel - Ethernet Adaptive Virtual Function devices. For more + X710, X722, XXV710, and all devices advertising support for + Intel Ethernet Adaptive Virtual Function devices. For more information on how to identify your adapter, go to the Adapter & Driver ID Guide that can be located at: - <http://support.intel.com> + <https://support.intel.com> + + This driver was formerly named i40evf. + + More specific information on configuring the driver is in + <file:Documentation/networking/iavf.rst>. To compile this driver as a module, choose M here. The module - will be called i40evf. MSI-X interrupt support is required + will be called iavf. MSI-X interrupt support is required for this driver to work correctly. config ICE @@ -262,6 +281,9 @@ config ICE <http://support.intel.com> + More specific information on configuring the driver is in + <file:Documentation/networking/ice.rst>. + To compile this driver as a module, choose M here. The module will be called ice. @@ -277,7 +299,26 @@ config FM10K <http://support.intel.com> + More specific information on configuring the driver is in + <file:Documentation/networking/fm10k.rst>. + To compile this driver as a module, choose M here. The module will be called fm10k. MSI-X interrupt support is required +config IGC + tristate "Intel(R) Ethernet Controller I225-LM/I225-V support" + default n + depends on PCI + ---help--- + This driver supports Intel(R) Ethernet Controller I225-LM/I225-V + family of adapters. + + For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide that can be located at: + + <http://support.intel.com> + + To compile this driver as a module, choose M here. The module + will be called igc. + endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile index 807a4f8c7e4e..3075290063f6 100644 --- a/drivers/net/ethernet/intel/Makefile +++ b/drivers/net/ethernet/intel/Makefile @@ -7,11 +7,12 @@ obj-$(CONFIG_E100) += e100.o obj-$(CONFIG_E1000) += e1000/ obj-$(CONFIG_E1000E) += e1000e/ obj-$(CONFIG_IGB) += igb/ +obj-$(CONFIG_IGC) += igc/ obj-$(CONFIG_IGBVF) += igbvf/ obj-$(CONFIG_IXGBE) += ixgbe/ obj-$(CONFIG_IXGBEVF) += ixgbevf/ obj-$(CONFIG_I40E) += i40e/ obj-$(CONFIG_IXGB) += ixgb/ -obj-$(CONFIG_I40EVF) += i40evf/ +obj-$(CONFIG_IAVF) += iavf/ obj-$(CONFIG_FM10K) += fm10k/ obj-$(CONFIG_ICE) += ice/ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 27d5f27163d2..7c4b55482f72 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -164,7 +164,7 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); MODULE_FIRMWARE(FIRMWARE_D101M); MODULE_FIRMWARE(FIRMWARE_D101S); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 2110d5f2da19..43b6d3cec3b3 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -195,7 +195,7 @@ static struct pci_driver e1000_driver = { MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) @@ -2433,7 +2433,6 @@ static void e1000_watchdog(struct work_struct *work) if (link) { if (!netif_carrier_ok(netdev)) { u32 ctrl; - bool txb2b = true; /* update snapshot of PHY registers on LSC */ e1000_get_speed_and_duplex(hw, &adapter->link_speed, @@ -2455,11 +2454,9 @@ static void e1000_watchdog(struct work_struct *work) adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: - txb2b = false; adapter->tx_timeout_factor = 16; break; case SPEED_100: - txb2b = false; /* maybe add some timeout factor ? */ break; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 3ba0c90e7055..16a73bd9f4cb 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6854,8 +6854,6 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) result = PCI_ERS_RESULT_RECOVERED; } - pci_cleanup_aer_uncorrect_error_status(pdev); - return result; } @@ -7592,7 +7590,7 @@ module_exit(e1000_exit_module); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); /* netdev.c */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 3f536541f45f..503bbc017792 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -21,7 +21,7 @@ static const char fm10k_copyright[] = MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); /* single workqueue for entire fm10k driver */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index c859ababeed5..02345d381303 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2440,8 +2440,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev) result = PCI_ERS_RESULT_RECOVERED; } - pci_cleanup_aer_uncorrect_error_status(pdev); - return result; } diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 14397e7e9925..50590e8d1fd1 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -22,6 +22,7 @@ i40e-objs := i40e_main.o \ i40e_txrx.o \ i40e_ptp.o \ i40e_client.o \ - i40e_virtchnl_pf.o + i40e_virtchnl_pf.o \ + i40e_xsk.o i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 7a80652e2500..876cac317e79 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -786,6 +786,11 @@ struct i40e_vsi { /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); + + /* AF_XDP zero-copy */ + struct xdp_umem **xsk_umems; + u16 num_xsk_umems_used; + u16 num_xsk_umems; } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { @@ -1090,6 +1095,20 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) return !!vsi->xdp_prog; } +static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) +{ + bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); + int qid = ring->queue_index; + + if (ring_is_xdp(ring)) + qid -= ring->vsi->alloc_queue_pairs; + + if (!ring->vsi->xsk_umems || !ring->vsi->xsk_umems[qid] || !xdp_on) + return NULL; + + return ring->vsi->xsk_umems[qid]; +} + int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 56b911a5dd8b..a20d1cf058ad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -132,8 +132,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", (unsigned long int)nd->vlan_features); } - dev_info(&pf->pdev->dev, " active_vlans is %s\n", - vsi->active_vlans ? "<valid>" : "<null>"); dev_info(&pf->pdev->dev, " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 5ff6caa83948..9f8464f80783 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -5,26 +5,227 @@ #include "i40e.h" #include "i40e_diag.h" +#include "i40e_txrx_common.h" +/* ethtool statistics helpers */ + +/** + * struct i40e_stats - definition for an ethtool statistic + * @stat_string: statistic name to display in ethtool -S output + * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) + * @stat_offset: offsetof() the stat from a base pointer + * + * This structure defines a statistic to be added to the ethtool stats buffer. + * It defines a statistic as offset from a common base pointer. Stats should + * be defined in constant arrays using the I40E_STAT macro, with every element + * of the array using the same _type for calculating the sizeof_stat and + * stat_offset. + * + * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or + * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from + * the i40e_add_ethtool_stat() helper function. + * + * The @stat_string is interpreted as a format string, allowing formatted + * values to be inserted while looping over multiple structures for a given + * statistics array. Thus, every statistic string in an array should have the + * same type and number of format specifiers, to be formatted by variadic + * arguments to the i40e_add_stat_string() helper function. + **/ struct i40e_stats { - /* The stat_string is expected to be a format string formatted using - * vsnprintf by i40e_add_stat_strings. Every member of a stats array - * should use the same format specifiers as they will be formatted - * using the same variadic arguments. - */ char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; }; +/* Helper macro to define an i40e_stat structure with proper size and type. + * Use this when defining constant statistics arrays. Note that @_type expects + * only a type name and is used multiple times. + */ #define I40E_STAT(_type, _name, _stat) { \ .stat_string = _name, \ .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ .stat_offset = offsetof(_type, _stat) \ } +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ #define I40E_NETDEV_STAT(_net_stat) \ I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + +/* Helper macro for defining some statistics related to queues */ +#define I40E_QUEUE_STAT(_name, _stat) \ + I40E_STAT(struct i40e_ring, _name, _stat) + +/* Stats associated with a Tx or Rx ring */ +static const struct i40e_stats i40e_gstrings_queue_stats[] = { + I40E_QUEUE_STAT("%s-%u.packets", stats.packets), + I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes), +}; + +/** + * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer + * @data: location to store the stat value + * @pointer: basis for where to copy from + * @stat: the stat definition + * + * Copies the stat data defined by the pointer and stat structure pair into + * the memory supplied as data. Used to implement i40e_add_ethtool_stats and + * i40e_add_queue_stats. If the pointer is null, data will be zero'd. + */ +static void +i40e_add_one_ethtool_stat(u64 *data, void *pointer, + const struct i40e_stats *stat) +{ + char *p; + + if (!pointer) { + /* ensure that the ethtool data buffer is zero'd for any stats + * which don't have a valid pointer. + */ + *data = 0; + return; + } + + p = (char *)pointer + stat->stat_offset; + switch (stat->sizeof_stat) { + case sizeof(u64): + *data = *((u64 *)p); + break; + case sizeof(u32): + *data = *((u32 *)p); + break; + case sizeof(u16): + *data = *((u16 *)p); + break; + case sizeof(u8): + *data = *((u8 *)p); + break; + default: + WARN_ONCE(1, "unexpected stat size for %s", + stat->stat_string); + *data = 0; + } +} + +/** + * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location to copy stats from + * @stats: array of stats to copy + * @size: the size of the stats definition + * + * Copy the stats defined by the stats array using the pointer as a base into + * the data buffer supplied by ethtool. Updates the data pointer to point to + * the next empty location for successive calls to __i40e_add_ethtool_stats. + * If pointer is null, set the data values to zero and update the pointer to + * skip these stats. + **/ +static void +__i40e_add_ethtool_stats(u64 **data, void *pointer, + const struct i40e_stats stats[], + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]); +} + +/** + * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location where stats are stored + * @stats: static const array of stat definitions + * + * Macro to ease the use of __i40e_add_ethtool_stats by taking a static + * constant stats array and passing the ARRAY_SIZE(). This avoids typos by + * ensuring that we pass the size associated with the given stats array. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. + **/ +#define i40e_add_ethtool_stats(data, pointer, stats) \ + __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) + +/** + * i40e_add_queue_stats - copy queue statistics into supplied buffer + * @data: ethtool stats buffer + * @ring: the ring to copy + * + * Queue statistics must be copied while protected by + * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats. + * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the + * ring pointer is null, zero out the queue stat values and update the data + * pointer. Otherwise safely copy the stats from the ring into the supplied + * buffer and update the data pointer when finished. + * + * This function expects to be called while under rcu_read_lock(). + **/ +static void +i40e_add_queue_stats(u64 **data, struct i40e_ring *ring) +{ + const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats); + const struct i40e_stats *stats = i40e_gstrings_queue_stats; + unsigned int start; + unsigned int i; + + /* To avoid invalid statistics values, ensure that we keep retrying + * the copy until we get a consistent value according to + * u64_stats_fetch_retry_irq. But first, make sure our ring is + * non-null before attempting to access its syncp. + */ + do { + start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); + for (i = 0; i < size; i++) { + i40e_add_one_ethtool_stat(&(*data)[i], ring, + &stats[i]); + } + } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); + + /* Once we successfully copy the stats in, update the data pointer */ + *data += size; +} + +/** + * __i40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @size: size of the stats array + * + * Format and copy the strings described by stats into the buffer pointed at + * by p. + **/ +static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], + const unsigned int size, ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +/** + * 40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * + * Format and copy the strings described by the const static stats value into + * the buffer pointed at by p. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. Additionally, stats must be an array such that + * ARRAY_SIZE can be called on it. + **/ +#define i40e_add_stat_strings(p, stats, ...) \ + __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) + #define I40E_PF_STAT(_name, _stat) \ I40E_STAT(struct i40e_pf, _name, _stat) #define I40E_VSI_STAT(_name, _stat) \ @@ -33,6 +234,8 @@ struct i40e_stats { I40E_STAT(struct i40e_veb, _name, _stat) #define I40E_PFC_STAT(_name, _stat) \ I40E_STAT(struct i40e_pfc_stats, _name, _stat) +#define I40E_QUEUE_STAT(_name, _stat) \ + I40E_STAT(struct i40e_ring, _name, _stat) static const struct i40e_stats i40e_gstrings_net_stats[] = { I40E_NETDEV_STAT(rx_packets), @@ -171,20 +374,11 @@ static const struct i40e_stats i40e_gstrings_pfc_stats[] = { I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff), }; -/* We use num_tx_queues here as a proxy for the maximum number of queues - * available because we always allocate queues symmetrically. - */ -#define I40E_MAX_NUM_QUEUES(n) ((n)->num_tx_queues) -#define I40E_QUEUE_STATS_LEN(n) \ - (I40E_MAX_NUM_QUEUES(n) \ - * 2 /* Tx and Rx together */ \ - * (sizeof(struct i40e_queue_stats) / sizeof(u64))) -#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) + #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) -#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ - I40E_MISC_STATS_LEN + \ - I40E_QUEUE_STATS_LEN((n))) + +#define I40E_VSI_STATS_LEN (I40E_NETDEV_STATS_LEN + I40E_MISC_STATS_LEN) #define I40E_PFC_STATS_LEN (ARRAY_SIZE(i40e_gstrings_pfc_stats) * \ I40E_MAX_USER_PRIORITY) @@ -193,10 +387,15 @@ static const struct i40e_stats i40e_gstrings_pfc_stats[] = { (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \ I40E_MAX_TRAFFIC_CLASS)) -#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ +#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) + +#define I40E_PF_STATS_LEN (I40E_GLOBAL_STATS_LEN + \ I40E_PFC_STATS_LEN + \ I40E_VEB_STATS_LEN + \ - I40E_VSI_STATS_LEN((n))) + I40E_VSI_STATS_LEN) + +/* Length of stats for a single queue */ +#define I40E_QUEUE_STATS_LEN ARRAY_SIZE(i40e_gstrings_queue_stats) enum i40e_ethtool_test_id { I40E_ETH_TEST_REG = 0, @@ -1512,6 +1711,13 @@ static int i40e_set_ringparam(struct net_device *netdev, (new_rx_count == vsi->rx_rings[0]->count)) return 0; + /* If there is a AF_XDP UMEM attached to any of Rx rings, + * disallow changing the number of descriptors -- regardless + * if the netdev is running or not. + */ + if (i40e_xsk_any_rx_ring_enabled(vsi)) + return -EBUSY; + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { timeout--; if (!timeout) @@ -1701,11 +1907,30 @@ static int i40e_get_stats_count(struct net_device *netdev) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + int stats_len; if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) - return I40E_PF_STATS_LEN(netdev); + stats_len = I40E_PF_STATS_LEN; else - return I40E_VSI_STATS_LEN(netdev); + stats_len = I40E_VSI_STATS_LEN; + + /* The number of stats reported for a given net_device must remain + * constant throughout the life of that device. + * + * This is because the API for obtaining the size, strings, and stats + * is spread out over three separate ethtool ioctls. There is no safe + * way to lock the number of stats across these calls, so we must + * assume that they will never change. + * + * Due to this, we report the maximum number of queues, even if not + * every queue is currently configured. Since we always allocate + * queues in pairs, we'll just use netdev->num_tx_queues * 2. This + * works because the num_tx_queues is set at device creation and never + * changes. + */ + stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->num_tx_queues; + + return stats_len; } static int i40e_get_sset_count(struct net_device *netdev, int sset) @@ -1728,89 +1953,6 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) } /** - * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer - * @data: location to store the stat value - * @pointer: basis for where to copy from - * @stat: the stat definition - * - * Copies the stat data defined by the pointer and stat structure pair into - * the memory supplied as data. Used to implement i40e_add_ethtool_stats. - * If the pointer is null, data will be zero'd. - */ -static inline void -i40e_add_one_ethtool_stat(u64 *data, void *pointer, - const struct i40e_stats *stat) -{ - char *p; - - if (!pointer) { - /* ensure that the ethtool data buffer is zero'd for any stats - * which don't have a valid pointer. - */ - *data = 0; - return; - } - - p = (char *)pointer + stat->stat_offset; - switch (stat->sizeof_stat) { - case sizeof(u64): - *data = *((u64 *)p); - break; - case sizeof(u32): - *data = *((u32 *)p); - break; - case sizeof(u16): - *data = *((u16 *)p); - break; - case sizeof(u8): - *data = *((u8 *)p); - break; - default: - WARN_ONCE(1, "unexpected stat size for %s", - stat->stat_string); - *data = 0; - } -} - -/** - * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer - * @data: ethtool stats buffer - * @pointer: location to copy stats from - * @stats: array of stats to copy - * @size: the size of the stats definition - * - * Copy the stats defined by the stats array using the pointer as a base into - * the data buffer supplied by ethtool. Updates the data pointer to point to - * the next empty location for successive calls to __i40e_add_ethtool_stats. - * If pointer is null, set the data values to zero and update the pointer to - * skip these stats. - **/ -static inline void -__i40e_add_ethtool_stats(u64 **data, void *pointer, - const struct i40e_stats stats[], - const unsigned int size) -{ - unsigned int i; - - for (i = 0; i < size; i++) - i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]); -} - -/** - * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer - * @data: ethtool stats buffer - * @pointer: location where stats are stored - * @stats: static const array of stat definitions - * - * Macro to ease the use of __i40e_add_ethtool_stats by taking a static - * constant stats array and passing the ARRAY_SIZE(). This avoids typos by - * ensuring that we pass the size associated with the given stats array. - * Assumes that stats is an array. - **/ -#define i40e_add_ethtool_stats(data, pointer, stats) \ - __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) - -/** * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure * @pf: the PF device structure * @i: the priority value to copy @@ -1853,12 +1995,10 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_veb *veb = pf->veb[pf->lan_veb]; unsigned int i; - unsigned int start; bool veb_stats; u64 *p = data; @@ -1870,38 +2010,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats); rcu_read_lock(); - for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev) ; i++) { - tx_ring = READ_ONCE(vsi->tx_rings[i]); - - if (!tx_ring) { - /* Bump the stat counter to skip these stats, and make - * sure the memory is zero'd - */ - *(data++) = 0; - *(data++) = 0; - *(data++) = 0; - *(data++) = 0; - continue; - } - - /* process Tx ring statistics */ - do { - start = u64_stats_fetch_begin_irq(&tx_ring->syncp); - data[0] = tx_ring->stats.packets; - data[1] = tx_ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); - data += 2; - - /* Rx ring is the 2nd half of the queue pair */ - rx_ring = &tx_ring[1]; - do { - start = u64_stats_fetch_begin_irq(&rx_ring->syncp); - data[0] = rx_ring->stats.packets; - data[1] = rx_ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); - data += 2; + for (i = 0; i < netdev->num_tx_queues; i++) { + i40e_add_queue_stats(&data, READ_ONCE(vsi->tx_rings[i])); + i40e_add_queue_stats(&data, READ_ONCE(vsi->rx_rings[i])); } rcu_read_unlock(); + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) goto check_data_pointer; @@ -1933,42 +2047,6 @@ check_data_pointer: } /** - * __i40e_add_stat_strings - copy stat strings into ethtool buffer - * @p: ethtool supplied buffer - * @stats: stat definitions array - * @size: size of the stats array - * - * Format and copy the strings described by stats into the buffer pointed at - * by p. - **/ -static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], - const unsigned int size, ...) -{ - unsigned int i; - - for (i = 0; i < size; i++) { - va_list args; - - va_start(args, size); - vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); - *p += ETH_GSTRING_LEN; - va_end(args); - } -} - -/** - * 40e_add_stat_strings - copy stat strings into ethtool buffer - * @p: ethtool supplied buffer - * @stats: stat definitions array - * - * Format and copy the strings described by the const static stats value into - * the buffer pointed at by p. Assumes that stats can have ARRAY_SIZE called - * for it. - **/ -#define i40e_add_stat_strings(p, stats, ...) \ - __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) - -/** * i40e_get_stat_strings - copy stat strings into supplied buffer * @netdev: the netdev to collect strings for * @data: supplied buffer to copy strings into @@ -1990,16 +2068,13 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) i40e_add_stat_strings(&data, i40e_gstrings_misc_stats); - for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) { - snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); - data += ETH_GSTRING_LEN; + for (i = 0; i < netdev->num_tx_queues; i++) { + i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, + "tx", i); + i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, + "rx", i); } + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ac685ad4d877..bc71a21c1dc2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9,7 +9,9 @@ /* Local includes */ #include "i40e.h" #include "i40e_diag.h" +#include "i40e_xsk.h" #include <net/udp_tunnel.h> +#include <net/xdp_sock.h> /* All i40e tracepoints are defined by the include below, which * must be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined @@ -89,7 +91,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); static struct workqueue_struct *i40e_wq; @@ -420,9 +422,9 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); + struct i40e_ring *ring; int i; if (test_bit(__I40E_VSI_DOWN, vsi->state)) @@ -436,24 +438,26 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, u64 bytes, packets; unsigned int start; - tx_ring = READ_ONCE(vsi->tx_rings[i]); - if (!tx_ring) + ring = READ_ONCE(vsi->tx_rings[i]); + if (!ring) continue; - i40e_get_netdev_stats_struct_tx(tx_ring, stats); + i40e_get_netdev_stats_struct_tx(ring, stats); - rx_ring = &tx_ring[1]; + if (i40e_enabled_xdp_vsi(vsi)) { + ring++; + i40e_get_netdev_stats_struct_tx(ring, stats); + } + ring++; do { - start = u64_stats_fetch_begin_irq(&rx_ring->syncp); - packets = rx_ring->stats.packets; - bytes = rx_ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; - if (i40e_enabled_xdp_vsi(vsi)) - i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats); } rcu_read_unlock(); @@ -1528,8 +1532,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) return 0; } - if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) + if (test_bit(__I40E_DOWN, pf->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return -EADDRNOTAVAIL; if (ether_addr_equal(hw->mac.addr, addr->sa_data)) @@ -1553,8 +1557,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; - ret = i40e_aq_mac_address_write(&vsi->back->hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, + ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, addr->sa_data, NULL); if (ret) netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", @@ -1565,7 +1568,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) /* schedule our worker thread which will take care of * applying the new filter changes */ - i40e_service_event_schedule(vsi->back); + i40e_service_event_schedule(pf); return 0; } @@ -3072,6 +3075,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) i40e_status err = 0; u32 qtx_ctl = 0; + if (ring_is_xdp(ring)) + ring->xsk_umem = i40e_xsk_umem(ring); + /* some ATR related tx ring init */ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { ring->atr_sample_rate = vsi->back->atr_sample_rate; @@ -3181,13 +3187,46 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_rxq rx_ctx; i40e_status err = 0; + bool ok; + int ret; bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(rx_ctx)); - ring->rx_buf_len = vsi->rx_buf_len; + if (ring->vsi->type == I40E_VSI_MAIN) + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + + ring->xsk_umem = i40e_xsk_umem(ring); + if (ring->xsk_umem) { + ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr - + XDP_PACKET_HEADROOM; + /* For AF_XDP ZC, we disallow packets to span on + * multiple buffers, thus letting us skip that + * handling in the fast-path. + */ + chain_len = 1; + ring->zca.free = i40e_zca_free; + ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_ZERO_COPY, + &ring->zca); + if (ret) + return ret; + dev_info(&vsi->back->pdev->dev, + "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n", + ring->queue_index); + + } else { + ring->rx_buf_len = vsi->rx_buf_len; + if (ring->vsi->type == I40E_VSI_MAIN) { + ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL); + if (ret) + return ret; + } + } rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); @@ -3243,7 +3282,15 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); - i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + ok = ring->xsk_umem ? + i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : + !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + if (!ok) { + dev_info(&vsi->back->pdev->dev, + "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", + ring->xsk_umem ? "UMEM enabled " : "", + ring->queue_index, pf_q); + } return 0; } @@ -6384,7 +6431,10 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) char *req_fec = ""; char *an = ""; - new_speed = pf->hw.phy.link_info.link_speed; + if (isup) + new_speed = pf->hw.phy.link_info.link_speed; + else + new_speed = I40E_LINK_SPEED_UNKNOWN; if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) return; @@ -6568,6 +6618,24 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) struct i40e_hw *hw = &pf->hw; i40e_status err; u64 mask; + u8 speed; + + /* Card might've been put in an unstable state by other drivers + * and applications, which causes incorrect speed values being + * set on startup. In order to clear speed registers, we call + * get_phy_capabilities twice, once to get initial state of + * available speeds, and once to get current PHY config. + */ + err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, + NULL); + if (err) { + dev_err(&pf->pdev->dev, + "failed to get phy cap., ret = %s last_status = %s\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return err; + } + speed = abilities.link_speed; /* Get the current phy config */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, @@ -6581,9 +6649,9 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) } /* If link needs to go up, but was not forced to go down, - * no need for a flap + * and its speed values are OK, no need for a flap */ - if (is_up && abilities.phy_type != 0) + if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) return I40E_SUCCESS; /* To force link we need to set bits for all supported PHY types, @@ -6595,7 +6663,10 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0; /* Copy the old settings, except of phy_type */ config.abilities = abilities.abilities; - config.link_speed = abilities.link_speed; + if (abilities.link_speed != 0) + config.link_speed = abilities.link_speed; + else + config.link_speed = speed; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; @@ -8440,14 +8511,9 @@ static void i40e_link_event(struct i40e_pf *pf) i40e_status status; bool new_link, old_link; - /* save off old link status information */ - pf->hw.phy.link_info_old = pf->hw.phy.link_info; - /* set this to force the get_link_status call to refresh state */ pf->hw.phy.get_link_info = true; - old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); - status = i40e_get_link_status(&pf->hw, &new_link); /* On success, disable temp link polling */ @@ -11828,6 +11894,256 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, } /** + * i40e_enter_busy_conf - Enters busy config state + * @vsi: vsi + * + * Returns 0 on success, <0 for failure. + **/ +static int i40e_enter_busy_conf(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int timeout = 50; + + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(1000, 2000); + } + + return 0; +} + +/** + * i40e_exit_busy_conf - Exits busy config state + * @vsi: vsi + **/ +static void i40e_exit_busy_conf(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + + clear_bit(__I40E_CONFIG_BUSY, pf->state); +} + +/** + * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair + * @vsi: vsi + * @queue_pair: queue pair + **/ +static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) +{ + memset(&vsi->rx_rings[queue_pair]->rx_stats, 0, + sizeof(vsi->rx_rings[queue_pair]->rx_stats)); + memset(&vsi->tx_rings[queue_pair]->stats, 0, + sizeof(vsi->tx_rings[queue_pair]->stats)); + if (i40e_enabled_xdp_vsi(vsi)) { + memset(&vsi->xdp_rings[queue_pair]->stats, 0, + sizeof(vsi->xdp_rings[queue_pair]->stats)); + } +} + +/** + * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair + * @vsi: vsi + * @queue_pair: queue pair + **/ +static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) +{ + i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); + if (i40e_enabled_xdp_vsi(vsi)) + i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); + i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); +} + +/** + * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair + * @vsi: vsi + * @queue_pair: queue pair + * @enable: true for enable, false for disable + **/ +static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair, + bool enable) +{ + struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; + struct i40e_q_vector *q_vector = rxr->q_vector; + + if (!vsi->netdev) + return; + + /* All rings in a qp belong to the same qvector. */ + if (q_vector->rx.ring || q_vector->tx.ring) { + if (enable) + napi_enable(&q_vector->napi); + else + napi_disable(&q_vector->napi); + } +} + +/** + * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair + * @vsi: vsi + * @queue_pair: queue pair + * @enable: true for enable, false for disable + * + * Returns 0 on success, <0 on failure. + **/ +static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair, + bool enable) +{ + struct i40e_pf *pf = vsi->back; + int pf_q, ret = 0; + + pf_q = vsi->base_queue + queue_pair; + ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, + false /*is xdp*/, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "VSI seid %d Tx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); + return ret; + } + + i40e_control_rx_q(pf, pf_q, enable); + ret = i40e_pf_rxq_wait(pf, pf_q, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "VSI seid %d Rx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); + return ret; + } + + /* Due to HW errata, on Rx disable only, the register can + * indicate done before it really is. Needs 50ms to be sure + */ + if (!enable) + mdelay(50); + + if (!i40e_enabled_xdp_vsi(vsi)) + return ret; + + ret = i40e_control_wait_tx_q(vsi->seid, pf, + pf_q + vsi->alloc_queue_pairs, + true /*is xdp*/, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "VSI seid %d XDP Tx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); + } + + return ret; +} + +/** + * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair + * @vsi: vsi + * @queue_pair: queue_pair + **/ +static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair) +{ + struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + + /* All rings in a qp belong to the same qvector. */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); + else + i40e_irq_dynamic_enable_icr0(pf); + + i40e_flush(hw); +} + +/** + * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair + * @vsi: vsi + * @queue_pair: queue_pair + **/ +static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair) +{ + struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + + /* For simplicity, instead of removing the qp interrupt causes + * from the interrupt linked list, we simply disable the interrupt, and + * leave the list intact. + * + * All rings in a qp belong to the same qvector. + */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; + + wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); + i40e_flush(hw); + synchronize_irq(pf->msix_entries[intpf].vector); + } else { + /* Legacy and MSI mode - this stops all interrupt handling */ + wr32(hw, I40E_PFINT_ICR0_ENA, 0); + wr32(hw, I40E_PFINT_DYN_CTL0, 0); + i40e_flush(hw); + synchronize_irq(pf->pdev->irq); + } +} + +/** + * i40e_queue_pair_disable - Disables a queue pair + * @vsi: vsi + * @queue_pair: queue pair + * + * Returns 0 on success, <0 on failure. + **/ +int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) +{ + int err; + + err = i40e_enter_busy_conf(vsi); + if (err) + return err; + + i40e_queue_pair_disable_irq(vsi, queue_pair); + err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); + i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); + i40e_queue_pair_clean_rings(vsi, queue_pair); + i40e_queue_pair_reset_stats(vsi, queue_pair); + + return err; +} + +/** + * i40e_queue_pair_enable - Enables a queue pair + * @vsi: vsi + * @queue_pair: queue pair + * + * Returns 0 on success, <0 on failure. + **/ +int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair) +{ + int err; + + err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]); + if (err) + return err; + + if (i40e_enabled_xdp_vsi(vsi)) { + err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]); + if (err) + return err; + } + + err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]); + if (err) + return err; + + err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */); + i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */); + i40e_queue_pair_enable_irq(vsi, queue_pair); + + i40e_exit_busy_conf(vsi); + + return err; +} + +/** * i40e_xdp - implements ndo_bpf for i40e * @dev: netdevice * @xdp: XDP command @@ -11847,6 +12163,12 @@ static int i40e_xdp(struct net_device *dev, case XDP_QUERY_PROG: xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; + case XDP_QUERY_XSK_UMEM: + return i40e_xsk_umem_query(vsi, &xdp->xsk.umem, + xdp->xsk.queue_id); + case XDP_SETUP_XSK_UMEM: + return i40e_xsk_umem_setup(vsi, xdp->xsk.umem, + xdp->xsk.queue_id); default: return -EINVAL; } @@ -11886,6 +12208,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_bridge_setlink = i40e_ndo_bridge_setlink, .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, + .ndo_xsk_async_xmit = i40e_xsk_async_xmit, }; /** @@ -13033,7 +13356,7 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) break; - if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { + if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { dev_info(&pf->pdev->dev, "vsi seid %d not found\n", vsi_seid); return NULL; @@ -14159,6 +14482,7 @@ static void i40e_remove(struct pci_dev *pdev) mutex_destroy(&hw->aq.asq_mutex); /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ + rtnl_lock(); i40e_clear_interrupt_scheme(pf); for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i]) { @@ -14167,6 +14491,7 @@ static void i40e_remove(struct pci_dev *pdev) pf->vsi[i] = NULL; } } + rtnl_unlock(); for (i = 0; i < I40E_MAX_VEB; i++) { kfree(pf->veb[i]); @@ -14227,7 +14552,6 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); pci_ers_result_t result; - int err; u32 reg; dev_dbg(&pdev->dev, "%s\n", __func__); @@ -14248,14 +14572,6 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) result = PCI_ERS_RESULT_DISCONNECT; } - err = pci_cleanup_aer_uncorrect_error_status(pdev); - if (err) { - dev_info(&pdev->dev, - "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", - err); - /* non-fatal, continue */ - } - return result; } @@ -14378,7 +14694,13 @@ static void i40e_shutdown(struct pci_dev *pdev) wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + /* Since we're going to destroy queues during the + * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this + * whole section + */ + rtnl_lock(); i40e_clear_interrupt_scheme(pf); + rtnl_unlock(); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, pf->wol_en); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 35f2866b38c6..1199f0502d6d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -694,7 +694,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) if (!IS_ERR_OR_NULL(pf->ptp_clock)) return 0; - strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name)); + strncpy(pf->ptp_caps.name, i40e_driver_name, + sizeof(pf->ptp_caps.name) - 1); pf->ptp_caps.owner = THIS_MODULE; pf->ptp_caps.max_adj = 999999999; pf->ptp_caps.n_ext_ts = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index b5042d1a63c0..740ea58ba938 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -8,16 +8,8 @@ #include "i40e.h" #include "i40e_trace.h" #include "i40e_prototype.h" - -static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, - u32 td_tag) -{ - return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | - ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | - ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | - ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | - ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); -} +#include "i40e_txrx_common.h" +#include "i40e_xsk.h" #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) /** @@ -536,8 +528,8 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, * This is used to verify if the FD programming or invalidation * requested by SW to the HW is successful or not and take actions accordingly. **/ -static void i40e_fd_handle_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, u8 prog_id) +void i40e_fd_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id) { struct i40e_pf *pf = rx_ring->vsi->back; struct pci_dev *pdev = pf->pdev; @@ -644,13 +636,18 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) unsigned long bi_size; u16 i; - /* ring already cleared, nothing to do */ - if (!tx_ring->tx_bi) - return; + if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) { + i40e_xsk_clean_tx_ring(tx_ring); + } else { + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_bi) + return; - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) - i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + i40e_unmap_and_free_tx_resource(tx_ring, + &tx_ring->tx_bi[i]); + } bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; memset(tx_ring->tx_bi, 0, bi_size); @@ -767,8 +764,6 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi) } } -#define WB_STRIDE 4 - /** * i40e_clean_tx_irq - Reclaim resources after transmit completes * @vsi: the VSI we care about @@ -873,27 +868,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, i += tx_ring->count; tx_ring->next_to_clean = i; - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - tx_ring->q_vector->tx.total_bytes += total_bytes; - tx_ring->q_vector->tx.total_packets += total_packets; - - if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { - /* check to see if there are < 4 descriptors - * waiting to be written back, then kick the hardware to force - * them to be written back in case we stay in NAPI. - * In this mode on X722 we do not enable Interrupt. - */ - unsigned int j = i40e_get_tx_pending(tx_ring, false); - - if (budget && - ((j / WB_STRIDE) == 0) && (j > 0) && - !test_bit(__I40E_VSI_DOWN, vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; - } + i40e_update_tx_stats(tx_ring, total_packets, total_bytes); + i40e_arm_wb(tx_ring, vsi, budget); if (ring_is_xdp(tx_ring)) return !!budget; @@ -1244,6 +1220,11 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, new_buff->page = old_buff->page; new_buff->page_offset = old_buff->page_offset; new_buff->pagecnt_bias = old_buff->pagecnt_bias; + + rx_ring->rx_stats.page_reuse_count++; + + /* clear contents of buffer_info */ + old_buff->page = NULL; } /** @@ -1266,7 +1247,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw) } /** - * i40e_clean_programming_status - clean the programming status descriptor + * i40e_clean_programming_status - try clean the programming status descriptor * @rx_ring: the rx ring that has this descriptor * @rx_desc: the rx descriptor written back by HW * @qw: qword representing status_error_len in CPU ordering @@ -1275,15 +1256,22 @@ static inline bool i40e_rx_is_programming_status(u64 qw) * status being successful or not and take actions accordingly. FCoE should * handle its context/filter programming/invalidation status and take actions. * + * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL. **/ -static void i40e_clean_programming_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, - u64 qw) +struct i40e_rx_buffer *i40e_clean_programming_status( + struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + u64 qw) { struct i40e_rx_buffer *rx_buffer; - u32 ntc = rx_ring->next_to_clean; + u32 ntc; u8 id; + if (!i40e_rx_is_programming_status(qw)) + return NULL; + + ntc = rx_ring->next_to_clean; + /* fetch, update, and store next to clean */ rx_buffer = &rx_ring->rx_bi[ntc++]; ntc = (ntc < rx_ring->count) ? ntc : 0; @@ -1291,18 +1279,13 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring, prefetch(I40E_RX_DESC(rx_ring, ntc)); - /* place unused page back on the ring */ - i40e_reuse_rx_page(rx_ring, rx_buffer); - rx_ring->rx_stats.page_reuse_count++; - - /* clear contents of buffer_info */ - rx_buffer->page = NULL; - id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) i40e_fd_handle_status(rx_ring, rx_desc, id); + + return rx_buffer; } /** @@ -1372,6 +1355,11 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) rx_ring->skb = NULL; } + if (rx_ring->xsk_umem) { + i40e_xsk_clean_rx_ring(rx_ring); + goto skip_free; + } + /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; @@ -1400,6 +1388,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) rx_bi->page_offset = 0; } +skip_free: bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; memset(rx_ring->rx_bi, 0, bi_size); @@ -1492,7 +1481,7 @@ err: * @rx_ring: ring to bump * @val: new head index **/ -static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; @@ -1576,8 +1565,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, * @skb: packet to send up * @vlan_tag: vlan tag for packet **/ -static void i40e_receive_skb(struct i40e_ring *rx_ring, - struct sk_buff *skb, u16 vlan_tag) +void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag) { struct i40e_q_vector *q_vector = rx_ring->q_vector; @@ -1804,7 +1793,6 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. **/ -static inline void i40e_process_skb_fields(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc, struct sk_buff *skb, u8 rx_ptype) @@ -2152,7 +2140,6 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, if (i40e_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); - rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, @@ -2160,10 +2147,9 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); + /* clear contents of buffer_info */ + rx_buffer->page = NULL; } - - /* clear contents of buffer_info */ - rx_buffer->page = NULL; } /** @@ -2199,16 +2185,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, return true; } -#define I40E_XDP_PASS 0 -#define I40E_XDP_CONSUMED BIT(0) -#define I40E_XDP_TX BIT(1) -#define I40E_XDP_REDIR BIT(2) - static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring); -static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, - struct i40e_ring *xdp_ring) +int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) { struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); @@ -2287,7 +2267,13 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, #endif } -static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) +/** + * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register + * @xdp_ring: XDP Tx ring + * + * This function updates the XDP Tx ring tail register. + **/ +void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. @@ -2297,6 +2283,48 @@ static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) } /** + * i40e_update_rx_stats - Update Rx ring statistics + * @rx_ring: rx descriptor ring + * @total_rx_bytes: number of bytes received + * @total_rx_packets: number of packets received + * + * This function updates the Rx ring statistics. + **/ +void i40e_update_rx_stats(struct i40e_ring *rx_ring, + unsigned int total_rx_bytes, + unsigned int total_rx_packets) +{ + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; +} + +/** + * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map + * @rx_ring: Rx ring + * @xdp_res: Result of the receive batch + * + * This function bumps XDP Tx tail and/or flush redirect map, and + * should be called when a batch of packets has been processed in the + * napi loop. + **/ +void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) +{ + if (xdp_res & I40E_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_res & I40E_XDP_TX) { + struct i40e_ring *xdp_ring = + rx_ring->vsi->xdp_rings[rx_ring->queue_index]; + + i40e_xdp_ring_update_tail(xdp_ring); + } +} + +/** * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process @@ -2349,11 +2377,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ dma_rmb(); - if (unlikely(i40e_rx_is_programming_status(qword))) { - i40e_clean_programming_status(rx_ring, rx_desc, qword); + rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc, + qword); + if (unlikely(rx_buffer)) { + i40e_reuse_rx_page(rx_ring, rx_buffer); cleaned_count++; continue; } + size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; if (!size) @@ -2432,24 +2463,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) total_rx_packets++; } - if (xdp_xmit & I40E_XDP_REDIR) - xdp_do_flush_map(); - - if (xdp_xmit & I40E_XDP_TX) { - struct i40e_ring *xdp_ring = - rx_ring->vsi->xdp_rings[rx_ring->queue_index]; - - i40e_xdp_ring_update_tail(xdp_ring); - } - + i40e_finalize_xdp_rx(rx_ring, xdp_xmit); rx_ring->skb = skb; - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - rx_ring->q_vector->rx.total_packets += total_rx_packets; - rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : (int)total_rx_packets; @@ -2587,7 +2604,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - if (!i40e_clean_tx_irq(vsi, ring, budget)) { + bool wd = ring->xsk_umem ? + i40e_clean_xdp_tx_irq(vsi, ring, budget) : + i40e_clean_tx_irq(vsi, ring, budget); + + if (!wd) { clean_complete = false; continue; } @@ -2605,7 +2626,9 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { - int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); + int cleaned = ring->xsk_umem ? + i40e_clean_rx_irq_zc(ring, budget_per_ring) : + i40e_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index bb04f6a731fe..100e92d2982f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -296,13 +296,17 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { dma_addr_t dma; - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; -#else - __u16 page_offset; -#endif - __u16 pagecnt_bias; + union { + struct { + struct page *page; + __u32 page_offset; + __u16 pagecnt_bias; + }; + struct { + void *addr; + u64 handle; + }; + }; }; struct i40e_queue_stats { @@ -414,6 +418,8 @@ struct i40e_ring { struct i40e_channel *ch; struct xdp_rxq_info xdp_rxq; + struct xdp_umem *xsk_umem; + struct zero_copy_allocator zca; /* ZC allocator anchor */ } ____cacheline_internodealigned_in_smp; static inline bool ring_uses_build_skb(struct i40e_ring *ring) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h new file mode 100644 index 000000000000..09809dffe399 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2018 Intel Corporation. */ + +#ifndef I40E_TXRX_COMMON_ +#define I40E_TXRX_COMMON_ + +void i40e_fd_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id); +int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); +struct i40e_rx_buffer *i40e_clean_programming_status( + struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + u64 qw); +void i40e_process_skb_fields(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype); +void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag); +void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring); +void i40e_update_rx_stats(struct i40e_ring *rx_ring, + unsigned int total_rx_bytes, + unsigned int total_rx_packets); +void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res); +void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val); + +#define I40E_XDP_PASS 0 +#define I40E_XDP_CONSUMED BIT(0) +#define I40E_XDP_TX BIT(1) +#define I40E_XDP_REDIR BIT(2) + +/** + * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword + **/ +static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, + u32 td_tag) +{ + return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | + ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +/** + * i40e_update_tx_stats - Update the egress statistics for the Tx ring + * @tx_ring: Tx ring to update + * @total_packet: total packets sent + * @total_bytes: total bytes sent + **/ +static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring, + unsigned int total_packets, + unsigned int total_bytes) +{ + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + tx_ring->q_vector->tx.total_bytes += total_bytes; + tx_ring->q_vector->tx.total_packets += total_packets; +} + +#define WB_STRIDE 4 + +/** + * i40e_arm_wb - (Possibly) arms Tx write-back + * @tx_ring: Tx ring to update + * @vsi: the VSI + * @budget: the NAPI budget left + **/ +static inline void i40e_arm_wb(struct i40e_ring *tx_ring, + struct i40e_vsi *vsi, + int budget) +{ + if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { + /* check to see if there are < 4 descriptors + * waiting to be written back, then kick the hardware to force + * them to be written back in case we stay in NAPI. + * In this mode on X722 we do not enable Interrupt. + */ + unsigned int j = i40e_get_tx_pending(tx_ring, false); + + if (budget && + ((j / WB_STRIDE) == 0) && j > 0 && + !test_bit(__I40E_VSI_DOWN, vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + } +} + +void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring); +void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring); +bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi); + +#endif /* I40E_TXRX_COMMON_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index c6d24eaede18..81b0e1f8d14b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1084,6 +1084,136 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) return -EIO; } +static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi); + +/** + * i40e_config_vf_promiscuous_mode + * @vf: pointer to the VF info + * @vsi_id: VSI id + * @allmulti: set MAC L2 layer multicast promiscuous enable/disable + * @alluni: set MAC L2 layer unicast promiscuous enable/disable + * + * Called from the VF to configure the promiscuous mode of + * VF vsis and from the VF reset path to reset promiscuous mode. + **/ +static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, + u16 vsi_id, + bool allmulti, + bool alluni) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_mac_filter *f; + i40e_status aq_ret = 0; + struct i40e_vsi *vsi; + int bkt; + + vsi = i40e_find_vsi_from_id(pf, vsi_id); + if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) + return I40E_ERR_PARAM; + + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + dev_err(&pf->pdev->dev, + "Unprivileged VF %d is attempting to configure promiscuous mode\n", + vf->vf_id); + /* Lie to the VF on purpose. */ + return 0; + } + + if (vf->port_vlan_id) { + aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, + allmulti, + vf->port_vlan_id, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + return aq_ret; + } + + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, + alluni, + vf->port_vlan_id, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + return aq_ret; + } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { + hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { + if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) + continue; + aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, + vsi->seid, + allmulti, + f->vlan, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", + f->vlan, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, + vsi->seid, + alluni, + f->vlan, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", + f->vlan, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + } + return aq_ret; + } + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + return aq_ret; + } + + aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni, + NULL, true); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + + return aq_ret; +} + /** * i40e_trigger_vf_reset * @vf: pointer to the VF structure @@ -1145,6 +1275,9 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) struct i40e_hw *hw = &pf->hw; u32 reg; + /* disable promisc modes in case they were enabled */ + i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); + /* free VF resources to begin resetting the VSI state */ i40e_free_vf_res(vf); @@ -1840,143 +1973,55 @@ static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) * i40e_vc_config_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to configure the promiscuous mode of * VF vsis **/ -static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, - u8 *msg, u16 msglen) +static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)msg; struct i40e_pf *pf = vf->pf; - struct i40e_hw *hw = &pf->hw; - struct i40e_mac_filter *f; i40e_status aq_ret = 0; bool allmulti = false; - struct i40e_vsi *vsi; bool alluni = false; - int aq_err = 0; - int bkt; - vsi = i40e_find_vsi_from_id(pf, info->vsi_id); - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || - !vsi) { - aq_ret = I40E_ERR_PARAM; - goto error_param; - } - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { - dev_err(&pf->pdev->dev, - "Unprivileged VF %d is attempting to configure promiscuous mode\n", - vf->vf_id); - /* Lie to the VF on purpose. */ - aq_ret = 0; - goto error_param; - } + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) + return I40E_ERR_PARAM; + /* Multicast promiscuous handling*/ if (info->flags & FLAG_VF_MULTICAST_PROMISC) allmulti = true; - if (vf->port_vlan_id) { - aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, - allmulti, - vf->port_vlan_id, - NULL); - } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { - hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { - if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) - continue; - aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, - vsi->seid, - allmulti, - f->vlan, - NULL); - aq_err = pf->hw.aq.asq_last_status; - if (aq_ret) { - dev_err(&pf->pdev->dev, - "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", - f->vlan, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - break; - } - } - } else { - aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, - allmulti, NULL); - aq_err = pf->hw.aq.asq_last_status; - if (aq_ret) { - dev_err(&pf->pdev->dev, - "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", - vf->vf_id, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - goto error_param; - } - } - + if (info->flags & FLAG_VF_UNICAST_PROMISC) + alluni = true; + aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, + alluni); if (!aq_ret) { - dev_info(&pf->pdev->dev, - "VF %d successfully set multicast promiscuous mode\n", - vf->vf_id); - if (allmulti) + if (allmulti) { + dev_info(&pf->pdev->dev, + "VF %d successfully set multicast promiscuous mode\n", + vf->vf_id); set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - else + } else { + dev_info(&pf->pdev->dev, + "VF %d successfully unset multicast promiscuous mode\n", + vf->vf_id); clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - } - - if (info->flags & FLAG_VF_UNICAST_PROMISC) - alluni = true; - if (vf->port_vlan_id) { - aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, - alluni, - vf->port_vlan_id, - NULL); - } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { - hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { - if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) - continue; - aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, - vsi->seid, - alluni, - f->vlan, - NULL); - aq_err = pf->hw.aq.asq_last_status; - if (aq_ret) - dev_err(&pf->pdev->dev, - "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", - f->vlan, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - } - } else { - aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, - alluni, NULL, - true); - aq_err = pf->hw.aq.asq_last_status; - if (aq_ret) { - dev_err(&pf->pdev->dev, - "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n", - vf->vf_id, info->flags, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - goto error_param; } - } - - if (!aq_ret) { - dev_info(&pf->pdev->dev, - "VF %d successfully set unicast promiscuous mode\n", - vf->vf_id); - if (alluni) + if (alluni) { + dev_info(&pf->pdev->dev, + "VF %d successfully set unicast promiscuous mode\n", + vf->vf_id); set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); - else + } else { + dev_info(&pf->pdev->dev, + "VF %d successfully unset unicast promiscuous mode\n", + vf->vf_id); clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); + } } -error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, @@ -1987,12 +2032,11 @@ error_param: * i40e_vc_config_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to configure the rx/tx * queues **/ -static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; @@ -2105,12 +2149,11 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, * i40e_vc_config_irq_map_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to configure the irq to * queue map **/ -static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_irq_map_info *irqmap_info = (struct virtchnl_irq_map_info *)msg; @@ -2202,11 +2245,10 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, * i40e_vc_enable_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to enable all or specific queue(s) **/ -static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; @@ -2261,12 +2303,11 @@ error_param: * i40e_vc_disable_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to disable all or specific * queue(s) **/ -static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; @@ -2309,14 +2350,13 @@ error_param: * i40e_vc_request_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * VFs get a default number of queues but can use this message to request a * different number. If the request is successful, PF will reset the VF and * return 0. If unsuccessful, PF will send message informing VF of number of * available queues and return result of sending VF a message. **/ -static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) +static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; @@ -2360,11 +2400,10 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) * i40e_vc_get_stats_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to get vsi stats **/ -static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; @@ -2458,7 +2497,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !is_multicast_ether_addr(addr) && vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, - "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); + "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); return -EPERM; } } @@ -2470,11 +2509,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, * i40e_vc_add_mac_addr_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * add guest mac address filter **/ -static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_ether_addr_list *al = (struct virtchnl_ether_addr_list *)msg; @@ -2541,11 +2579,10 @@ error_param: * i40e_vc_del_mac_addr_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * remove guest mac address filter **/ -static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_ether_addr_list *al = (struct virtchnl_ether_addr_list *)msg; @@ -2569,6 +2606,16 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ret = I40E_ERR_INVALID_MAC_ADDR; goto error_param; } + + if (vf->pf_set_mac && + ether_addr_equal(al->list[i].addr, + vf->default_lan_addr.addr)) { + dev_err(&pf->pdev->dev, + "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n", + vf->default_lan_addr.addr, vf->vf_id); + ret = I40E_ERR_PARAM; + goto error_param; + } } vsi = pf->vsi[vf->lan_vsi_idx]; @@ -2601,11 +2648,10 @@ error_param: * i40e_vc_add_vlan_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * program guest vlan id **/ -static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; @@ -2674,11 +2720,10 @@ error_param: * i40e_vc_remove_vlan_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * remove programmed guest vlan id **/ -static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; @@ -2761,13 +2806,11 @@ error_param: * i40e_vc_iwarp_qvmap_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * @config: config qvmap or release it * * called from the VF for the iwarp msgs **/ -static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, - bool config) +static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) { struct virtchnl_iwarp_qvlist_info *qvlist_info = (struct virtchnl_iwarp_qvlist_info *)msg; @@ -2798,11 +2841,10 @@ error_param: * i40e_vc_config_rss_key * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Configure the VF's RSS key **/ -static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; @@ -2830,11 +2872,10 @@ err: * i40e_vc_config_rss_lut * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Configure the VF's RSS LUT **/ -static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; @@ -2862,11 +2903,10 @@ err: * i40e_vc_get_rss_hena * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Return the RSS HENA bits allowed by the hardware **/ -static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_hena *vrh = NULL; struct i40e_pf *pf = vf->pf; @@ -2898,11 +2938,10 @@ err: * i40e_vc_set_rss_hena * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Set the RSS HENA bits for the VF **/ -static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; @@ -2927,12 +2966,10 @@ err: * i40e_vc_enable_vlan_stripping * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Enable vlan header stripping for the VF **/ -static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg, - u16 msglen) +static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) { struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; i40e_status aq_ret = 0; @@ -2954,12 +2991,10 @@ err: * i40e_vc_disable_vlan_stripping * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * Disable vlan header stripping for the VF **/ -static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg, - u16 msglen) +static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) { struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; i40e_status aq_ret = 0; @@ -3659,65 +3694,65 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, ret = 0; break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: - ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); + ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: - ret = i40e_vc_config_queues_msg(vf, msg, msglen); + ret = i40e_vc_config_queues_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: - ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); + ret = i40e_vc_config_irq_map_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_QUEUES: - ret = i40e_vc_enable_queues_msg(vf, msg, msglen); + ret = i40e_vc_enable_queues_msg(vf, msg); i40e_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_DISABLE_QUEUES: - ret = i40e_vc_disable_queues_msg(vf, msg, msglen); + ret = i40e_vc_disable_queues_msg(vf, msg); break; case VIRTCHNL_OP_ADD_ETH_ADDR: - ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); + ret = i40e_vc_add_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_DEL_ETH_ADDR: - ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); + ret = i40e_vc_del_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_ADD_VLAN: - ret = i40e_vc_add_vlan_msg(vf, msg, msglen); + ret = i40e_vc_add_vlan_msg(vf, msg); break; case VIRTCHNL_OP_DEL_VLAN: - ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); + ret = i40e_vc_remove_vlan_msg(vf, msg); break; case VIRTCHNL_OP_GET_STATS: - ret = i40e_vc_get_stats_msg(vf, msg, msglen); + ret = i40e_vc_get_stats_msg(vf, msg); break; case VIRTCHNL_OP_IWARP: ret = i40e_vc_iwarp_msg(vf, msg, msglen); break; case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: - ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true); + ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); break; case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: - ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); + ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: - ret = i40e_vc_config_rss_key(vf, msg, msglen); + ret = i40e_vc_config_rss_key(vf, msg); break; case VIRTCHNL_OP_CONFIG_RSS_LUT: - ret = i40e_vc_config_rss_lut(vf, msg, msglen); + ret = i40e_vc_config_rss_lut(vf, msg); break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: - ret = i40e_vc_get_rss_hena(vf, msg, msglen); + ret = i40e_vc_get_rss_hena(vf, msg); break; case VIRTCHNL_OP_SET_RSS_HENA: - ret = i40e_vc_set_rss_hena(vf, msg, msglen); + ret = i40e_vc_set_rss_hena(vf, msg); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: - ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen); + ret = i40e_vc_enable_vlan_stripping(vf, msg); break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: - ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen); + ret = i40e_vc_disable_vlan_stripping(vf, msg); break; case VIRTCHNL_OP_REQUEST_QUEUES: - ret = i40e_vc_request_queues_msg(vf, msg, msglen); + ret = i40e_vc_request_queues_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_CHANNELS: ret = i40e_vc_add_qch_msg(vf, msg); @@ -3786,6 +3821,35 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) } /** + * i40e_validate_vf + * @pf: the physical function + * @vf_id: VF identifier + * + * Check that the VF is enabled and the VSI exists. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) +{ + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret = 0; + + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, + "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto err_out; + } + vf = &pf->vf[vf_id]; + vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); + if (!vsi) + ret = -EINVAL; +err_out: + return ret; +} + +/** * i40e_ndo_set_vf_mac * @netdev: network interface device structure * @vf_id: VF identifier @@ -3806,14 +3870,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) u8 i; /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, - "Invalid VF Identifier %d\n", vf_id); - ret = -EINVAL; + ret = i40e_validate_vf(pf, vf_id); + if (ret) goto error_param; - } - vf = &(pf->vf[vf_id]); + vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; /* When the VF is resetting wait until it is done. @@ -3873,9 +3934,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) mac, vf_id); } - /* Force the VF driver stop so it has to reload with new MAC address */ + /* Force the VF interface down so it has to bring up with new MAC + * address + */ i40e_vc_disable_vf(vf); - dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); + dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); error_param: return ret; @@ -3930,11 +3993,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, int ret = 0; /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); - ret = -EINVAL; + ret = i40e_validate_vf(pf, vf_id); + if (ret) goto error_pvid; - } if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); @@ -3948,7 +4009,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, goto error_pvid; } - vf = &(pf->vf[vf_id]); + vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", @@ -4068,11 +4129,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int ret = 0; /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); - ret = -EINVAL; + ret = i40e_validate_vf(pf, vf_id); + if (ret) goto error; - } if (min_tx_rate) { dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", @@ -4080,7 +4139,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, return -EINVAL; } - vf = &(pf->vf[vf_id]); + vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", @@ -4116,13 +4175,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, int ret = 0; /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); - ret = -EINVAL; + ret = i40e_validate_vf(pf, vf_id); + if (ret) goto error_param; - } - vf = &(pf->vf[vf_id]); + vf = &pf->vf[vf_id]; /* first vsi is always the LAN vsi */ vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { @@ -4199,7 +4256,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) vf->link_forced = true; vf->link_up = true; pfe.event_data.link_event.link_status = true; - pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; + pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; break; case IFLA_VF_LINK_STATE_DISABLE: vf->link_forced = true; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c new file mode 100644 index 000000000000..add1e457886d --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -0,0 +1,967 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Intel Corporation. */ + +#include <linux/bpf_trace.h> +#include <net/xdp_sock.h> +#include <net/xdp.h> + +#include "i40e.h" +#include "i40e_txrx_common.h" +#include "i40e_xsk.h" + +/** + * i40e_alloc_xsk_umems - Allocate an array to store per ring UMEMs + * @vsi: Current VSI + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi) +{ + if (vsi->xsk_umems) + return 0; + + vsi->num_xsk_umems_used = 0; + vsi->num_xsk_umems = vsi->alloc_queue_pairs; + vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems), + GFP_KERNEL); + if (!vsi->xsk_umems) { + vsi->num_xsk_umems = 0; + return -ENOMEM; + } + + return 0; +} + +/** + * i40e_add_xsk_umem - Store an UMEM for a certain ring/qid + * @vsi: Current VSI + * @umem: UMEM to store + * @qid: Ring/qid to associate with the UMEM + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem, + u16 qid) +{ + int err; + + err = i40e_alloc_xsk_umems(vsi); + if (err) + return err; + + vsi->xsk_umems[qid] = umem; + vsi->num_xsk_umems_used++; + + return 0; +} + +/** + * i40e_remove_xsk_umem - Remove an UMEM for a certain ring/qid + * @vsi: Current VSI + * @qid: Ring/qid associated with the UMEM + **/ +static void i40e_remove_xsk_umem(struct i40e_vsi *vsi, u16 qid) +{ + vsi->xsk_umems[qid] = NULL; + vsi->num_xsk_umems_used--; + + if (vsi->num_xsk_umems == 0) { + kfree(vsi->xsk_umems); + vsi->xsk_umems = NULL; + vsi->num_xsk_umems = 0; + } +} + +/** + * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev + * @vsi: Current VSI + * @umem: UMEM to DMA map + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem) +{ + struct i40e_pf *pf = vsi->back; + struct device *dev; + unsigned int i, j; + dma_addr_t dma; + + dev = &pf->pdev->dev; + for (i = 0; i < umem->npgs; i++) { + dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); + if (dma_mapping_error(dev, dma)) + goto out_unmap; + + umem->pages[i].dma = dma; + } + + return 0; + +out_unmap: + for (j = 0; j < i; j++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); + umem->pages[i].dma = 0; + } + + return -1; +} + +/** + * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev + * @vsi: Current VSI + * @umem: UMEM to DMA map + **/ +static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) +{ + struct i40e_pf *pf = vsi->back; + struct device *dev; + unsigned int i; + + dev = &pf->pdev->dev; + + for (i = 0; i < umem->npgs; i++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); + + umem->pages[i].dma = 0; + } +} + +/** + * i40e_xsk_umem_enable - Enable/associate an UMEM to a certain ring/qid + * @vsi: Current VSI + * @umem: UMEM + * @qid: Rx ring to associate UMEM to + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, + u16 qid) +{ + struct xdp_umem_fq_reuse *reuseq; + bool if_running; + int err; + + if (vsi->type != I40E_VSI_MAIN) + return -EINVAL; + + if (qid >= vsi->num_queue_pairs) + return -EINVAL; + + if (vsi->xsk_umems) { + if (qid >= vsi->num_xsk_umems) + return -EINVAL; + if (vsi->xsk_umems[qid]) + return -EBUSY; + } + + reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); + if (!reuseq) + return -ENOMEM; + + xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); + + err = i40e_xsk_umem_dma_map(vsi, umem); + if (err) + return err; + + if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); + + if (if_running) { + err = i40e_queue_pair_disable(vsi, qid); + if (err) + return err; + } + + err = i40e_add_xsk_umem(vsi, umem, qid); + if (err) + return err; + + if (if_running) { + err = i40e_queue_pair_enable(vsi, qid); + if (err) + return err; + } + + return 0; +} + +/** + * i40e_xsk_umem_disable - Diassociate an UMEM from a certain ring/qid + * @vsi: Current VSI + * @qid: Rx ring to associate UMEM to + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) +{ + bool if_running; + int err; + + if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems || + !vsi->xsk_umems[qid]) + return -EINVAL; + + if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); + + if (if_running) { + err = i40e_queue_pair_disable(vsi, qid); + if (err) + return err; + } + + i40e_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]); + i40e_remove_xsk_umem(vsi, qid); + + if (if_running) { + err = i40e_queue_pair_enable(vsi, qid); + if (err) + return err; + } + + return 0; +} + +/** + * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM + * @vsi: Current VSI + * @umem: UMEM associated to the ring, if any + * @qid: Rx ring to associate UMEM to + * + * This function will store, if any, the UMEM associated to certain ring. + * + * Returns 0 on success, <0 on failure + **/ +int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem, + u16 qid) +{ + if (vsi->type != I40E_VSI_MAIN) + return -EINVAL; + + if (qid >= vsi->num_queue_pairs) + return -EINVAL; + + if (vsi->xsk_umems) { + if (qid >= vsi->num_xsk_umems) + return -EINVAL; + *umem = vsi->xsk_umems[qid]; + return 0; + } + + *umem = NULL; + return 0; +} + +/** + * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM + * @vsi: Current VSI + * @umem: UMEM to enable/associate to a ring, or NULL to disable + * @qid: Rx ring to (dis)associate UMEM (from)to + * + * This function enables or disables an UMEM to a certain ring. + * + * Returns 0 on success, <0 on failure + **/ +int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, + u16 qid) +{ + return umem ? i40e_xsk_umem_enable(vsi, umem, qid) : + i40e_xsk_umem_disable(vsi, qid); +} + +/** + * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff + * @rx_ring: Rx ring + * @xdp: xdp_buff used as input to the XDP program + * + * This function enables or disables an UMEM to a certain ring. + * + * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} + **/ +static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) +{ + int err, result = I40E_XDP_PASS; + struct i40e_ring *xdp_ring; + struct bpf_prog *xdp_prog; + u32 act; + + rcu_read_lock(); + /* NB! xdp_prog will always be !NULL, due to the fact that + * this path is enabled by setting an XDP program. + */ + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, xdp); + xdp->handle += xdp->data - xdp->data_hard_start; + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; + result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + case XDP_DROP: + result = I40E_XDP_CONSUMED; + break; + } + rcu_read_unlock(); + return result; +} + +/** + * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer + * @rx_ring: Rx ring + * @bi: Rx buffer to populate + * + * This function allocates an Rx buffer. The buffer can come from fill + * queue, or via the recycle queue (next_to_alloc). + * + * Returns true for a successful allocation, false otherwise + **/ +static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + void *addr = bi->addr; + u64 handle, hr; + + if (addr) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } + + if (!xsk_umem_peek_addr(umem, &handle)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr(umem); + return true; +} + +/** + * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer + * @rx_ring: Rx ring + * @bi: Rx buffer to populate + * + * This function allocates an Rx buffer. The buffer can come from fill + * queue, or via the reuse queue. + * + * Returns true for a successful allocation, false otherwise + **/ +static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + u64 handle, hr; + + if (!xsk_umem_peek_addr_rq(umem, &handle)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + handle &= rx_ring->xsk_umem->chunk_mask; + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr_rq(umem); + return true; +} + +static __always_inline bool +__i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count, + bool alloc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi)) +{ + u16 ntu = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + bool ok = true; + + rx_desc = I40E_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_bi[ntu]; + do { + if (!alloc(rx_ring, bi)) { + ok = false; + goto no_buffers; + } + + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, + rx_ring->rx_buf_len, + DMA_BIDIRECTIONAL); + + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + + rx_desc++; + bi++; + ntu++; + + if (unlikely(ntu == rx_ring->count)) { + rx_desc = I40E_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_bi; + ntu = 0; + } + + rx_desc->wb.qword1.status_error_len = 0; + count--; + } while (count); + +no_buffers: + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); + + return ok; +} + +/** + * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers + * @rx_ring: Rx ring + * @count: The number of buffers to allocate + * + * This function allocates a number of Rx buffers from the reuse queue + * or fill ring and places them on the Rx ring. + * + * Returns true for a successful allocation, false otherwise + **/ +bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) +{ + return __i40e_alloc_rx_buffers_zc(rx_ring, count, + i40e_alloc_buffer_slow_zc); +} + +/** + * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers + * @rx_ring: Rx ring + * @count: The number of buffers to allocate + * + * This function allocates a number of Rx buffers from the fill ring + * or the internal recycle mechanism and places them on the Rx ring. + * + * Returns true for a successful allocation, false otherwise + **/ +static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count) +{ + return __i40e_alloc_rx_buffers_zc(rx_ring, count, + i40e_alloc_buffer_zc); +} + +/** + * i40e_get_rx_buffer_zc - Return the current Rx buffer + * @rx_ring: Rx ring + * @size: The size of the rx buffer (read from descriptor) + * + * This function returns the current, received Rx buffer, and also + * does DMA synchronization. the Rx ring. + * + * Returns the received Rx buffer + **/ +static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring, + const unsigned int size) +{ + struct i40e_rx_buffer *bi; + + bi = &rx_ring->rx_bi[rx_ring->next_to_clean]; + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + bi->dma, 0, + size, + DMA_BIDIRECTIONAL); + + return bi; +} + +/** + * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer + * @rx_ring: Rx ring + * @old_bi: The Rx buffer to recycle + * + * This function recycles a finished Rx buffer, and places it on the + * recycle queue (next_to_alloc). + **/ +static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *old_bi) +{ + struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc]; + unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; + u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + u16 nta = rx_ring->next_to_alloc; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_bi->dma = old_bi->dma & mask; + new_bi->dma += hr; + + new_bi->addr = (void *)((unsigned long)old_bi->addr & mask); + new_bi->addr += hr; + + new_bi->handle = old_bi->handle & mask; + new_bi->handle += rx_ring->xsk_umem->headroom; + + old_bi->addr = NULL; +} + +/** + * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations + * @alloc: Zero-copy allocator + * @handle: Buffer handle + **/ +void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) +{ + struct i40e_rx_buffer *bi; + struct i40e_ring *rx_ring; + u64 hr, mask; + u16 nta; + + rx_ring = container_of(alloc, struct i40e_ring, zca); + hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + mask = rx_ring->xsk_umem->chunk_mask; + + nta = rx_ring->next_to_alloc; + bi = &rx_ring->rx_bi[nta]; + + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + handle &= mask; + + bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); + bi->addr += hr; + + bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; +} + +/** + * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer + * @rx_ring: Rx ring + * @bi: Rx buffer + * @xdp: xdp_buff + * + * This functions allocates a new skb from a zero-copy Rx buffer. + * + * Returns the skb, or NULL on failure. + **/ +static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + struct sk_buff *skb; + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + + i40e_reuse_rx_buffer_zc(rx_ring, bi); + return skb; +} + +/** + * i40e_inc_ntc: Advance the next_to_clean index + * @rx_ring: Rx ring + **/ +static void i40e_inc_ntc(struct i40e_ring *rx_ring) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + prefetch(I40E_RX_DESC(rx_ring, ntc)); +} + +/** + * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring + * @rx_ring: Rx ring + * @budget: NAPI budget + * + * Returns amount of work completed + **/ +int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + unsigned int xdp_res, xdp_xmit = 0; + bool failure = false; + struct sk_buff *skb; + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; + + while (likely(total_rx_packets < (unsigned int)budget)) { + struct i40e_rx_buffer *bi; + union i40e_rx_desc *rx_desc; + unsigned int size; + u16 vlan_tag; + u8 rx_ptype; + u64 qword; + + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + failure = failure || + !i40e_alloc_rx_buffers_fast_zc(rx_ring, + cleaned_count); + cleaned_count = 0; + } + + rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. + */ + dma_rmb(); + + bi = i40e_clean_programming_status(rx_ring, rx_desc, + qword); + if (unlikely(bi)) { + i40e_reuse_rx_buffer_zc(rx_ring, bi); + cleaned_count++; + continue; + } + + size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + if (!size) + break; + + bi = i40e_get_rx_buffer_zc(rx_ring, size); + xdp.data = bi->addr; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = xdp.data + size; + xdp.handle = bi->handle; + + xdp_res = i40e_run_xdp_zc(rx_ring, &xdp); + if (xdp_res) { + if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { + xdp_xmit |= xdp_res; + bi->addr = NULL; + } else { + i40e_reuse_rx_buffer_zc(rx_ring, bi); + } + + total_rx_bytes += size; + total_rx_packets++; + + cleaned_count++; + i40e_inc_ntc(rx_ring); + continue; + } + + /* XDP_PASS path */ + + /* NB! We are not checking for errors using + * i40e_test_staterr with + * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that + * SBP is *not* set in PRT_SBPVSI (default not set). + */ + skb = i40e_construct_skb_zc(rx_ring, bi, &xdp); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + break; + } + + cleaned_count++; + i40e_inc_ntc(rx_ring); + + if (eth_skb_pad(skb)) + continue; + + total_rx_bytes += skb->len; + total_rx_packets++; + + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + + vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? + le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); + } + + i40e_finalize_xdp_rx(rx_ring, xdp_xmit); + i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); + return failure ? budget : (int)total_rx_packets; +} + +/** + * i40e_xmit_zc - Performs zero-copy Tx AF_XDP + * @xdp_ring: XDP Tx ring + * @budget: NAPI budget + * + * Returns true if the work is finished. + **/ +static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) +{ + struct i40e_tx_desc *tx_desc = NULL; + struct i40e_tx_buffer *tx_bi; + bool work_done = true; + dma_addr_t dma; + u32 len; + + while (budget-- > 0) { + if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { + xdp_ring->tx_stats.tx_busy++; + work_done = false; + break; + } + + if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) + break; + + dma_sync_single_for_device(xdp_ring->dev, dma, len, + DMA_BIDIRECTIONAL); + + tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use]; + tx_bi->bytecount = len; + + tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use); + tx_desc->buffer_addr = cpu_to_le64(dma); + tx_desc->cmd_type_offset_bsz = + build_ctob(I40E_TX_DESC_CMD_ICRC + | I40E_TX_DESC_CMD_EOP, + 0, len, 0); + + xdp_ring->next_to_use++; + if (xdp_ring->next_to_use == xdp_ring->count) + xdp_ring->next_to_use = 0; + } + + if (tx_desc) { + /* Request an interrupt for the last frame and bump tail ptr. */ + tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << + I40E_TXD_QW1_CMD_SHIFT); + i40e_xdp_ring_update_tail(xdp_ring); + + xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + } + + return !!budget && work_done; +} + +/** + * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry + * @tx_ring: XDP Tx ring + * @tx_bi: Tx buffer info to clean + **/ +static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, + struct i40e_tx_buffer *tx_bi) +{ + xdp_return_frame(tx_bi->xdpf); + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_bi, dma), + dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_bi, len, 0); +} + +/** + * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries + * @tx_ring: XDP Tx ring + * @tx_bi: Tx buffer info to clean + * + * Returns true if cleanup/tranmission is done. + **/ +bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, + struct i40e_ring *tx_ring, int napi_budget) +{ + unsigned int ntc, total_bytes = 0, budget = vsi->work_limit; + u32 i, completed_frames, frames_ready, xsk_frames = 0; + struct xdp_umem *umem = tx_ring->xsk_umem; + u32 head_idx = i40e_get_head(tx_ring); + bool work_done = true, xmit_done; + struct i40e_tx_buffer *tx_bi; + + if (head_idx < tx_ring->next_to_clean) + head_idx += tx_ring->count; + frames_ready = head_idx - tx_ring->next_to_clean; + + if (frames_ready == 0) { + goto out_xmit; + } else if (frames_ready > budget) { + completed_frames = budget; + work_done = false; + } else { + completed_frames = frames_ready; + } + + ntc = tx_ring->next_to_clean; + + for (i = 0; i < completed_frames; i++) { + tx_bi = &tx_ring->tx_bi[ntc]; + + if (tx_bi->xdpf) + i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + total_bytes += tx_bi->bytecount; + + if (++ntc >= tx_ring->count) + ntc = 0; + } + + tx_ring->next_to_clean += completed_frames; + if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) + tx_ring->next_to_clean -= tx_ring->count; + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); + + i40e_arm_wb(tx_ring, vsi, budget); + i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); + +out_xmit: + xmit_done = i40e_xmit_zc(tx_ring, budget); + + return work_done && xmit_done; +} + +/** + * i40e_xsk_async_xmit - Implements the ndo_xsk_async_xmit + * @dev: the netdevice + * @queue_id: queue id to wake up + * + * Returns <0 for errors, 0 otherwise. + **/ +int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_ring *ring; + + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return -ENETDOWN; + + if (!i40e_enabled_xdp_vsi(vsi)) + return -ENXIO; + + if (queue_id >= vsi->num_queue_pairs) + return -ENXIO; + + if (!vsi->xdp_rings[queue_id]->xsk_umem) + return -ENXIO; + + ring = vsi->xdp_rings[queue_id]; + + /* The idea here is that if NAPI is running, mark a miss, so + * it will run again. If not, trigger an interrupt and + * schedule the NAPI from interrupt context. If NAPI would be + * scheduled here, the interrupt affinity would not be + * honored. + */ + if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) + i40e_force_wb(vsi, ring->q_vector); + + return 0; +} + +void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) +{ + u16 i; + + for (i = 0; i < rx_ring->count; i++) { + struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + + if (!rx_bi->addr) + continue; + + xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle); + rx_bi->addr = NULL; + } +} + +/** + * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown + * @xdp_ring: XDP Tx ring + **/ +void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) +{ + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; + struct xdp_umem *umem = tx_ring->xsk_umem; + struct i40e_tx_buffer *tx_bi; + u32 xsk_frames = 0; + + while (ntc != ntu) { + tx_bi = &tx_ring->tx_bi[ntc]; + + if (tx_bi->xdpf) + i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + + ntc++; + if (ntc >= tx_ring->count) + ntc = 0; + } + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); +} + +/** + * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached + * @vsi: vsi + * + * Returns true if any of the Rx rings has an AF_XDP UMEM attached + **/ +bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) +{ + int i; + + if (!vsi->xsk_umems) + return false; + + for (i = 0; i < vsi->num_queue_pairs; i++) { + if (vsi->xsk_umems[i]) + return true; + } + + return false; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h new file mode 100644 index 000000000000..9038c5d5cf08 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2018 Intel Corporation. */ + +#ifndef _I40E_XSK_H_ +#define _I40E_XSK_H_ + +struct i40e_vsi; +struct xdp_umem; +struct zero_copy_allocator; + +int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair); +int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair); +int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem, + u16 qid); +int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, + u16 qid); +void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); +bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count); +int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); + +bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, + struct i40e_ring *tx_ring, int napi_budget); +int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id); + +#endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile deleted file mode 100644 index 3c5c6e962280..000000000000 --- a/drivers/net/ethernet/intel/i40evf/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# Copyright(c) 2013 - 2018 Intel Corporation. - -# -## Makefile for the Intel(R) 40GbE VF driver -# -# - -ccflags-y += -I$(src) -subdir-ccflags-y += -I$(src) - -obj-$(CONFIG_I40EVF) += i40evf.o - -i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \ - i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o - diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h deleted file mode 100644 index 5fd8529465d4..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ /dev/null @@ -1,2717 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_ADMINQ_CMD_H_ -#define _I40E_ADMINQ_CMD_H_ - -/* This header file defines the i40e Admin Queue commands and is shared between - * i40e Firmware and Software. - * - * This file needs to comply with the Linux Kernel coding style. - */ - -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR_X722 0x0005 -#define I40E_FW_API_VERSION_MINOR_X710 0x0007 - -#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ - I40E_FW_API_VERSION_MINOR_X710 : \ - I40E_FW_API_VERSION_MINOR_X722) - -/* API version 1.7 implements additional link and PHY-specific APIs */ -#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 - -struct i40e_aq_desc { - __le16 flags; - __le16 opcode; - __le16 datalen; - __le16 retval; - __le32 cookie_high; - __le32 cookie_low; - union { - struct { - __le32 param0; - __le32 param1; - __le32 param2; - __le32 param3; - } internal; - struct { - __le32 param0; - __le32 param1; - __le32 addr_high; - __le32 addr_low; - } external; - u8 raw[16]; - } params; -}; - -/* Flags sub-structure - * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | - * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | - */ - -/* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 -#define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 -#define I40E_AQ_FLAG_LB_SHIFT 9 -#define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 -#define I40E_AQ_FLAG_BUF_SHIFT 12 -#define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 - -#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ - -/* error codes */ -enum i40e_admin_queue_err { - I40E_AQ_RC_OK = 0, /* success */ - I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ - I40E_AQ_RC_ENOENT = 2, /* No such element */ - I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ - I40E_AQ_RC_EINTR = 4, /* operation interrupted */ - I40E_AQ_RC_EIO = 5, /* I/O error */ - I40E_AQ_RC_ENXIO = 6, /* No such resource */ - I40E_AQ_RC_E2BIG = 7, /* Arg too long */ - I40E_AQ_RC_EAGAIN = 8, /* Try again */ - I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ - I40E_AQ_RC_EACCES = 10, /* Permission denied */ - I40E_AQ_RC_EFAULT = 11, /* Bad address */ - I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ - I40E_AQ_RC_EEXIST = 13, /* object already exists */ - I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ - I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ - I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ - I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ - I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ - I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ - I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ - I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ - I40E_AQ_RC_EFBIG = 22, /* File too large */ -}; - -/* Admin Queue command opcodes */ -enum i40e_admin_queue_opc { - /* aq commands */ - i40e_aqc_opc_get_version = 0x0001, - i40e_aqc_opc_driver_version = 0x0002, - i40e_aqc_opc_queue_shutdown = 0x0003, - i40e_aqc_opc_set_pf_context = 0x0004, - - /* resource ownership */ - i40e_aqc_opc_request_resource = 0x0008, - i40e_aqc_opc_release_resource = 0x0009, - - i40e_aqc_opc_list_func_capabilities = 0x000A, - i40e_aqc_opc_list_dev_capabilities = 0x000B, - - /* Proxy commands */ - i40e_aqc_opc_set_proxy_config = 0x0104, - i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, - - /* LAA */ - i40e_aqc_opc_mac_address_read = 0x0107, - i40e_aqc_opc_mac_address_write = 0x0108, - - /* PXE */ - i40e_aqc_opc_clear_pxe_mode = 0x0110, - - /* WoL commands */ - i40e_aqc_opc_set_wol_filter = 0x0120, - i40e_aqc_opc_get_wake_reason = 0x0121, - - /* internal switch commands */ - i40e_aqc_opc_get_switch_config = 0x0200, - i40e_aqc_opc_add_statistics = 0x0201, - i40e_aqc_opc_remove_statistics = 0x0202, - i40e_aqc_opc_set_port_parameters = 0x0203, - i40e_aqc_opc_get_switch_resource_alloc = 0x0204, - i40e_aqc_opc_set_switch_config = 0x0205, - i40e_aqc_opc_rx_ctl_reg_read = 0x0206, - i40e_aqc_opc_rx_ctl_reg_write = 0x0207, - - i40e_aqc_opc_add_vsi = 0x0210, - i40e_aqc_opc_update_vsi_parameters = 0x0211, - i40e_aqc_opc_get_vsi_parameters = 0x0212, - - i40e_aqc_opc_add_pv = 0x0220, - i40e_aqc_opc_update_pv_parameters = 0x0221, - i40e_aqc_opc_get_pv_parameters = 0x0222, - - i40e_aqc_opc_add_veb = 0x0230, - i40e_aqc_opc_update_veb_parameters = 0x0231, - i40e_aqc_opc_get_veb_parameters = 0x0232, - - i40e_aqc_opc_delete_element = 0x0243, - - i40e_aqc_opc_add_macvlan = 0x0250, - i40e_aqc_opc_remove_macvlan = 0x0251, - i40e_aqc_opc_add_vlan = 0x0252, - i40e_aqc_opc_remove_vlan = 0x0253, - i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, - i40e_aqc_opc_add_tag = 0x0255, - i40e_aqc_opc_remove_tag = 0x0256, - i40e_aqc_opc_add_multicast_etag = 0x0257, - i40e_aqc_opc_remove_multicast_etag = 0x0258, - i40e_aqc_opc_update_tag = 0x0259, - i40e_aqc_opc_add_control_packet_filter = 0x025A, - i40e_aqc_opc_remove_control_packet_filter = 0x025B, - i40e_aqc_opc_add_cloud_filters = 0x025C, - i40e_aqc_opc_remove_cloud_filters = 0x025D, - i40e_aqc_opc_clear_wol_switch_filters = 0x025E, - - i40e_aqc_opc_add_mirror_rule = 0x0260, - i40e_aqc_opc_delete_mirror_rule = 0x0261, - - /* Dynamic Device Personalization */ - i40e_aqc_opc_write_personalization_profile = 0x0270, - i40e_aqc_opc_get_personalization_profile_list = 0x0271, - - /* DCB commands */ - i40e_aqc_opc_dcb_ignore_pfc = 0x0301, - i40e_aqc_opc_dcb_updated = 0x0302, - i40e_aqc_opc_set_dcb_parameters = 0x0303, - - /* TX scheduler */ - i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, - i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, - i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, - i40e_aqc_opc_query_vsi_bw_config = 0x0408, - i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, - i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, - - i40e_aqc_opc_enable_switching_comp_ets = 0x0413, - i40e_aqc_opc_modify_switching_comp_ets = 0x0414, - i40e_aqc_opc_disable_switching_comp_ets = 0x0415, - i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, - i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, - i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, - i40e_aqc_opc_query_port_ets_config = 0x0419, - i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, - i40e_aqc_opc_suspend_port_tx = 0x041B, - i40e_aqc_opc_resume_port_tx = 0x041C, - i40e_aqc_opc_configure_partition_bw = 0x041D, - /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, - - /* phy commands*/ - i40e_aqc_opc_get_phy_abilities = 0x0600, - i40e_aqc_opc_set_phy_config = 0x0601, - i40e_aqc_opc_set_mac_config = 0x0603, - i40e_aqc_opc_set_link_restart_an = 0x0605, - i40e_aqc_opc_get_link_status = 0x0607, - i40e_aqc_opc_set_phy_int_mask = 0x0613, - i40e_aqc_opc_get_local_advt_reg = 0x0614, - i40e_aqc_opc_set_local_advt_reg = 0x0615, - i40e_aqc_opc_get_partner_advt = 0x0616, - i40e_aqc_opc_set_lb_modes = 0x0618, - i40e_aqc_opc_get_phy_wol_caps = 0x0621, - i40e_aqc_opc_set_phy_debug = 0x0622, - i40e_aqc_opc_upload_ext_phy_fm = 0x0625, - i40e_aqc_opc_run_phy_activity = 0x0626, - i40e_aqc_opc_set_phy_register = 0x0628, - i40e_aqc_opc_get_phy_register = 0x0629, - - /* NVM commands */ - i40e_aqc_opc_nvm_read = 0x0701, - i40e_aqc_opc_nvm_erase = 0x0702, - i40e_aqc_opc_nvm_update = 0x0703, - i40e_aqc_opc_nvm_config_read = 0x0704, - i40e_aqc_opc_nvm_config_write = 0x0705, - i40e_aqc_opc_oem_post_update = 0x0720, - i40e_aqc_opc_thermal_sensor = 0x0721, - - /* virtualization commands */ - i40e_aqc_opc_send_msg_to_pf = 0x0801, - i40e_aqc_opc_send_msg_to_vf = 0x0802, - i40e_aqc_opc_send_msg_to_peer = 0x0803, - - /* alternate structure */ - i40e_aqc_opc_alternate_write = 0x0900, - i40e_aqc_opc_alternate_write_indirect = 0x0901, - i40e_aqc_opc_alternate_read = 0x0902, - i40e_aqc_opc_alternate_read_indirect = 0x0903, - i40e_aqc_opc_alternate_write_done = 0x0904, - i40e_aqc_opc_alternate_set_mode = 0x0905, - i40e_aqc_opc_alternate_clear_port = 0x0906, - - /* LLDP commands */ - i40e_aqc_opc_lldp_get_mib = 0x0A00, - i40e_aqc_opc_lldp_update_mib = 0x0A01, - i40e_aqc_opc_lldp_add_tlv = 0x0A02, - i40e_aqc_opc_lldp_update_tlv = 0x0A03, - i40e_aqc_opc_lldp_delete_tlv = 0x0A04, - i40e_aqc_opc_lldp_stop = 0x0A05, - i40e_aqc_opc_lldp_start = 0x0A06, - - /* Tunnel commands */ - i40e_aqc_opc_add_udp_tunnel = 0x0B00, - i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_set_rss_key = 0x0B02, - i40e_aqc_opc_set_rss_lut = 0x0B03, - i40e_aqc_opc_get_rss_key = 0x0B04, - i40e_aqc_opc_get_rss_lut = 0x0B05, - - /* Async Events */ - i40e_aqc_opc_event_lan_overflow = 0x1001, - - /* OEM commands */ - i40e_aqc_opc_oem_parameter_change = 0xFE00, - i40e_aqc_opc_oem_device_status_change = 0xFE01, - i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, - i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, - - /* debug commands */ - i40e_aqc_opc_debug_read_reg = 0xFF03, - i40e_aqc_opc_debug_write_reg = 0xFF04, - i40e_aqc_opc_debug_modify_reg = 0xFF07, - i40e_aqc_opc_debug_dump_internals = 0xFF08, -}; - -/* command structures and indirect data structures */ - -/* Structure naming conventions: - * - no suffix for direct command descriptor structures - * - _data for indirect sent data - * - _resp for indirect return data (data which is both will use _data) - * - _completion for direct return data - * - _element_ for repeated elements (may also be _data or _resp) - * - * Command structures are expected to overlay the params.raw member of the basic - * descriptor, and as such cannot exceed 16 bytes in length. - */ - -/* This macro is used to generate a compilation error if a structure - * is not exactly the correct length. It gives a divide by zero error if the - * structure is not of the correct size, otherwise it creates an enum that is - * never used. - */ -#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ - { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } - -/* This macro is used extensively to ensure that command structures are 16 - * bytes in length as they have to map to the raw array of that size. - */ -#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) - -/* internal (0x00XX) commands */ - -/* Get version (direct 0x0001) */ -struct i40e_aqc_get_version { - __le32 rom_ver; - __le32 fw_build; - __le16 fw_major; - __le16 fw_minor; - __le16 api_major; - __le16 api_minor; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); - -/* Send driver version (indirect 0x0002) */ -struct i40e_aqc_driver_version { - u8 driver_major_ver; - u8 driver_minor_ver; - u8 driver_build_ver; - u8 driver_subbuild_ver; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); - -/* Queue Shutdown (direct 0x0003) */ -struct i40e_aqc_queue_shutdown { - __le32 driver_unloading; -#define I40E_AQ_DRIVER_UNLOADING 0x1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); - -/* Set PF context (0x0004, direct) */ -struct i40e_aqc_set_pf_context { - u8 pf_id; - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); - -/* Request resource ownership (direct 0x0008) - * Release resource ownership (direct 0x0009) - */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 - -struct i40e_aqc_request_resource { - __le16 resource_id; - __le16 access_type; - __le32 timeout; - __le32 resource_number; - u8 reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); - -/* Get function capabilities (indirect 0x000A) - * Get device capabilities (indirect 0x000B) - */ -struct i40e_aqc_list_capabilites { - u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 - u8 pf_index; - u8 reserved[2]; - __le32 count; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); - -struct i40e_aqc_list_capabilities_element_resp { - __le16 id; - u8 major_rev; - u8 minor_rev; - __le32 number; - __le32 logical_id; - __le32 phys_id; - u8 reserved[16]; -}; - -/* list of caps */ - -#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 -#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 -#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 -#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 -#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 -#define I40E_AQ_CAP_ID_SRIOV 0x0012 -#define I40E_AQ_CAP_ID_VF 0x0013 -#define I40E_AQ_CAP_ID_VMDQ 0x0014 -#define I40E_AQ_CAP_ID_8021QBG 0x0015 -#define I40E_AQ_CAP_ID_8021QBR 0x0016 -#define I40E_AQ_CAP_ID_VSI 0x0017 -#define I40E_AQ_CAP_ID_DCB 0x0018 -#define I40E_AQ_CAP_ID_FCOE 0x0021 -#define I40E_AQ_CAP_ID_ISCSI 0x0022 -#define I40E_AQ_CAP_ID_RSS 0x0040 -#define I40E_AQ_CAP_ID_RXQ 0x0041 -#define I40E_AQ_CAP_ID_TXQ 0x0042 -#define I40E_AQ_CAP_ID_MSIX 0x0043 -#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 -#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 -#define I40E_AQ_CAP_ID_1588 0x0046 -#define I40E_AQ_CAP_ID_IWARP 0x0051 -#define I40E_AQ_CAP_ID_LED 0x0061 -#define I40E_AQ_CAP_ID_SDP 0x0062 -#define I40E_AQ_CAP_ID_MDIO 0x0063 -#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 -#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 -#define I40E_AQ_CAP_ID_FLEX10 0x00F1 -#define I40E_AQ_CAP_ID_CEM 0x00F2 - -/* Set CPPM Configuration (direct 0x0103) */ -struct i40e_aqc_cppm_configuration { - __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 - __le16 ttlx; - __le32 dmacr; - __le16 dmcth; - u8 hptc; - u8 reserved; - __le32 pfltrc; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); - -/* Set ARP Proxy command / response (indirect 0x0104) */ -struct i40e_aqc_arp_proxy_data { - __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0800 -#define I40E_AQ_ARP_UNSUP_CTL 0x1000 -#define I40E_AQ_ARP_ENA 0x2000 -#define I40E_AQ_ARP_ADD_IPV4 0x4000 -#define I40E_AQ_ARP_DEL_IPV4 0x8000 - __le16 table_id; - __le32 enabled_offloads; -#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 -#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 - __le32 ip_addr; - u8 mac_addr[6]; - u8 reserved[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); - -/* Set NS Proxy Table Entry Command (indirect 0x0105) */ -struct i40e_aqc_ns_proxy_data { - __le16 table_idx_mac_addr_0; - __le16 table_idx_mac_addr_1; - __le16 table_idx_ipv6_0; - __le16 table_idx_ipv6_1; - __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0001 -#define I40E_AQ_NS_PROXY_DEL_0 0x0002 -#define I40E_AQ_NS_PROXY_ADD_1 0x0004 -#define I40E_AQ_NS_PROXY_DEL_1 0x0008 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 -#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 -#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 - u8 mac_addr_0[6]; - u8 mac_addr_1[6]; - u8 local_mac_addr[6]; - u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ - u8 ipv6_addr_1[16]; -}; - -I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); - -/* Manage LAA Command (0x0106) - obsolete */ -struct i40e_aqc_mng_laa { - __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 - u8 reserved[2]; - __le32 sal; - __le16 sah; - u8 reserved2[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); - -/* Manage MAC Address Read Command (indirect 0x0107) */ -struct i40e_aqc_mac_address_read { - __le16 command_flags; -#define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 -#define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_MC_MAG_EN_VALID 0x100 -#define I40E_AQC_ADDR_VALID_MASK 0x3F0 - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); - -struct i40e_aqc_mac_address_read_data { - u8 pf_lan_mac[6]; - u8 pf_san_mac[6]; - u8 port_mac[6]; - u8 pf_wol_mac[6]; -}; - -I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); - -/* Manage MAC Address Write Command (0x0108) */ -struct i40e_aqc_mac_address_write { - __le16 command_flags; -#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 -#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 -#define I40E_AQC_WRITE_TYPE_MASK 0xC000 - - __le16 mac_sah; - __le32 mac_sal; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); - -/* PXE commands (0x011x) */ - -/* Clear PXE Command and response (direct 0x0110) */ -struct i40e_aqc_clear_pxe { - u8 rx_cnt; - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); - -/* Set WoL Filter (0x0120) */ - -struct i40e_aqc_set_wol_filter { - __le16 filter_index; -#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ - I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) - -#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 -#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ - I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) - __le16 cmd_flags; -#define I40E_AQC_SET_WOL_FILTER 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 -#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000 -#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 -#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 - __le16 valid_flags; -#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 - u8 reserved[2]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); - -struct i40e_aqc_set_wol_filter_data { - u8 filter[128]; - u8 mask[16]; -}; - -I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); - -/* Get Wake Reason (0x0121) */ - -struct i40e_aqc_get_wake_reason_completion { - u8 reserved_1[2]; - __le16 wake_reason; -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) - u8 reserved_2[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); - -/* Switch configuration commands (0x02xx) */ - -/* Used by many indirect commands that only pass an seid and a buffer in the - * command - */ -struct i40e_aqc_switch_seid { - __le16 seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); - -/* Get Switch Configuration command (indirect 0x0200) - * uses i40e_aqc_switch_seid for the descriptor - */ -struct i40e_aqc_get_switch_config_header_resp { - __le16 num_reported; - __le16 num_total; - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); - -struct i40e_aqc_switch_config_element_resp { - u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 - u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 - __le16 seid; - __le16 uplink_seid; - __le16 downlink_seid; - u8 reserved[3]; - u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 - __le16 scheduler_id; - __le16 element_info; -}; - -I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); - -/* Get Switch Configuration (indirect 0x0200) - * an array of elements are returned in the response buffer - * the first in the array is the header, remainder are elements - */ -struct i40e_aqc_get_switch_config_resp { - struct i40e_aqc_get_switch_config_header_resp header; - struct i40e_aqc_switch_config_element_resp element[1]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); - -/* Add Statistics (direct 0x0201) - * Remove Statistics (direct 0x0202) - */ -struct i40e_aqc_add_remove_statistics { - __le16 seid; - __le16 vlan; - __le16 stat_index; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); - -/* Set Port Parameters command (direct 0x0203) */ -struct i40e_aqc_set_port_parameters { - __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 - __le16 bad_frame_vsi; -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF - __le16 default_seid; /* reserved for command */ - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); - -/* Get Switch Resource Allocation (indirect 0x0204) */ -struct i40e_aqc_get_switch_resource_alloc { - u8 num_entries; /* reserved for command */ - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); - -/* expect an array of these structs in the response buffer */ -struct i40e_aqc_switch_resource_alloc_element_resp { - u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 - u8 reserved1; - __le16 guaranteed; - __le16 total; - __le16 used; - __le16 total_unalloced; - u8 reserved2[6]; -}; - -I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); - -/* Set Switch Configuration (direct 0x0205) */ -struct i40e_aqc_set_switch_config { - __le16 flags; -/* flags used for both fields below */ -#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 -#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 - __le16 valid_flags; - /* The ethertype in switch_tag is dropped on ingress and used - * internally by the switch. Set this to zero for the default - * of 0x88a8 (802.1ad). Should be zero for firmware API - * versions lower than 1.7. - */ - __le16 switch_tag; - /* The ethertypes in first_tag and second_tag are used to - * match the outer and inner VLAN tags (respectively) when HW - * double VLAN tagging is enabled via the set port parameters - * AQ command. Otherwise these are both ignored. Set them to - * zero for their defaults of 0x8100 (802.1Q). Should be zero - * for firmware API versions lower than 1.7. - */ - __le16 first_tag; - __le16 second_tag; - u8 reserved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); - -/* Read Receive control registers (direct 0x0206) - * Write Receive control registers (direct 0x0207) - * used for accessing Rx control registers that can be - * slow and need special handling when under high Rx load - */ -struct i40e_aqc_rx_ctl_reg_read_write { - __le32 reserved1; - __le32 address; - __le32 reserved2; - __le32 value; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); - -/* Add VSI (indirect 0x0210) - * this indirect command uses struct i40e_aqc_vsi_properties_data - * as the indirect buffer (128 bytes) - * - * Update VSI (indirect 0x211) - * uses the same data structure as Add VSI - * - * Get VSI (indirect 0x0212) - * uses the same completion and data structure as Add VSI - */ -struct i40e_aqc_add_get_update_vsi { - __le16 uplink_seid; - u8 connection_type; -#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 - u8 reserved1; - u8 vf_id; - u8 reserved2; - __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) -#define I40E_AQ_VSI_TYPE_VF 0x0 -#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 -#define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); - -struct i40e_aqc_add_get_update_vsi_completion { - __le16 seid; - __le16 vsi_number; - __le16 vsi_used; - __le16 vsi_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); - -struct i40e_aqc_vsi_properties_data { - /* first 96 byte are written by SW */ - __le16 valid_sections; -#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 -#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 -#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 -#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 -#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 -#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 - /* switch section */ - __le16 switch_id; /* 12bit id combined with flags below */ -#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 -#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 -#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 -#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 - u8 sw_reserved[2]; - /* security section */ - u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 - u8 sec_reserved; - /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - __le16 fcoe_pvid; - u8 port_vlan_flags; -#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 -#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ - I40E_AQ_VSI_PVLAN_MODE_SHIFT) -#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 -#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 -#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 -#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 -#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ - I40E_AQ_VSI_PVLAN_EMOD_SHIFT) -#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 -#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 -#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 - u8 pvlan_reserved[3]; - /* ingress egress up sections */ - __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) - __le32 egress_table; /* same defines as for ingress table */ - /* cascaded PV section */ - __le16 cas_pv_tag; - u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 - u8 cas_pv_reserved; - /* queue mapping section */ - __le16 mapping_flags; -#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 -#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 - __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) - __le16 tc_mapping[8]; -#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) -#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) - /* queueing option section */ - u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 -#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 -#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 - u8 queueing_opt_reserved[3]; - /* scheduler section */ - u8 up_enable_bits; - u8 sched_reserved; - /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress tbl */ - u8 cmd_reserved[8]; - /* last 32 bytes are written by FW */ - __le16 qs_handle[8]; -#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF - __le16 stat_counter_idx; - __le16 sched_id; - u8 resp_reserved[12]; -}; - -I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); - -/* Add Port Virtualizer (direct 0x0220) - * also used for update PV (direct 0x0221) but only flags are used - * (IS_CTRL_PORT only works on add PV) - */ -struct i40e_aqc_add_update_pv { - __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 - __le16 uplink_seid; - __le16 connected_seid; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); - -struct i40e_aqc_add_update_pv_completion { - /* reserved for update; for add also encodes error if rc == ENOSPC */ - __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); - -/* Get PV Params (direct 0x0222) - * uses i40e_aqc_switch_seid for the descriptor - */ - -struct i40e_aqc_get_pv_params_completion { - __le16 seid; - __le16 default_stag; - __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 - u8 reserved[8]; - __le16 default_port_seid; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); - -/* Add VEB (direct 0x0230) */ -struct i40e_aqc_add_veb { - __le16 uplink_seid; - __le16 downlink_seid; - __le16 veb_flags; -#define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) -#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 -#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ -#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 - u8 enable_tcs; - u8 reserved[9]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); - -struct i40e_aqc_add_veb_completion { - u8 reserved[6]; - __le16 switch_seid; - /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ - __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); - -/* Get VEB Parameters (direct 0x0232) - * uses i40e_aqc_switch_seid for the descriptor - */ -struct i40e_aqc_get_veb_parameters_completion { - __le16 seid; - __le16 switch_id; - __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; - u8 reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); - -/* Delete Element (direct 0x0243) - * uses the generic i40e_aqc_switch_seid - */ - -/* Add MAC-VLAN (indirect 0x0250) */ - -/* used for the command for most vlan commands */ -struct i40e_aqc_macvlan { - __le16 num_addresses; - __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) -#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); - -/* indirect data for command and response */ -struct i40e_aqc_add_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - __le16 flags; -#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 -#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 -#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 - __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) - /* response section */ - u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 -#define I40E_AQC_MM_ERR_NO_RES 0xFF - u8 reserved1[3]; -}; - -struct i40e_aqc_add_remove_macvlan_completion { - __le16 perfect_mac_used; - __le16 perfect_mac_free; - __le16 unicast_hash_free; - __le16 multicast_hash_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); - -/* Remove MAC-VLAN (indirect 0x0251) - * uses i40e_aqc_macvlan for the descriptor - * data points to an array of num_addresses of elements - */ - -struct i40e_aqc_remove_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - u8 flags; -#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 -#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 - u8 reserved[3]; - /* reply section */ - u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF - u8 reply_reserved[3]; -}; - -/* Add VLAN (indirect 0x0252) - * Remove VLAN (indirect 0x0253) - * use the generic i40e_aqc_macvlan for the command - */ -struct i40e_aqc_add_remove_vlan_element_data { - __le16 vlan_tag; - u8 vlan_flags; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 - u8 reserved; - u8 result; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF - u8 reserved1[3]; -}; - -struct i40e_aqc_add_remove_vlan_completion { - u8 reserved[4]; - __le16 vlans_used; - __le16 vlans_free; - __le32 addr_high; - __le32 addr_low; -}; - -/* Set VSI Promiscuous Modes (direct 0x0254) */ -struct i40e_aqc_set_vsi_promiscuous_modes { - __le16 promiscuous_flags; - __le16 valid_flags; -/* flags used for both fields above */ -#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 -#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 -#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 -#define I40E_AQC_SET_VSI_DEFAULT 0x08 -#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 - __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF -#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); - -/* Add S/E-tag command (direct 0x0255) - * Uses generic i40e_aqc_add_remove_tag_completion for completion - */ -struct i40e_aqc_add_tag { - __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 - __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - __le16 queue_number; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); - -struct i40e_aqc_add_remove_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); - -/* Remove S/E-tag command (direct 0x0256) - * Uses generic i40e_aqc_add_remove_tag_completion for completion - */ -struct i40e_aqc_remove_tag { - __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); - -/* Add multicast E-Tag (direct 0x0257) - * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields - * and no external data - */ -struct i40e_aqc_add_remove_mcast_etag { - __le16 pv_seid; - __le16 etag; - u8 num_unicast_etags; - u8 reserved[3]; - __le32 addr_high; /* address of array of 2-byte s-tags */ - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); - -struct i40e_aqc_add_remove_mcast_etag_completion { - u8 reserved[4]; - __le16 mcast_etags_used; - __le16 mcast_etags_free; - __le32 addr_high; - __le32 addr_low; - -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); - -/* Update S/E-Tag (direct 0x0259) */ -struct i40e_aqc_update_tag { - __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) - __le16 old_tag; - __le16 new_tag; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); - -struct i40e_aqc_update_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); - -/* Add Control Packet filter (direct 0x025A) - * Remove Control Packet filter (direct 0x025B) - * uses the i40e_aqc_add_oveb_cloud, - * and the generic direct completion structure - */ -struct i40e_aqc_add_remove_control_packet_filter { - u8 mac[6]; - __le16 etype; - __le16 flags; -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 - __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) - __le16 queue; - u8 reserved[2]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); - -struct i40e_aqc_add_remove_control_packet_filter_completion { - __le16 mac_etype_used; - __le16 etype_used; - __le16 mac_etype_free; - __le16 etype_free; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); - -/* Add Cloud filters (indirect 0x025C) - * Remove Cloud filters (indirect 0x025D) - * uses the i40e_aqc_add_remove_cloud_filters, - * and the generic indirect completion structure - */ -struct i40e_aqc_add_remove_cloud_filters { - u8 num_filters; - u8 reserved; - __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 big_buffer_flag; -#define I40E_AQC_ADD_CLOUD_CMD_BB 1 - u8 reserved2[3]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); - -struct i40e_aqc_cloud_filters_element_data { - u8 outer_mac[6]; - u8 inner_mac[6]; - __le16 inner_vlan; - union { - struct { - u8 reserved[12]; - u8 data[4]; - } v4; - struct { - u8 data[16]; - } v6; - struct { - __le16 data[8]; - } raw_v6; - } ipaddr; - __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_FILTER_SHIFT) -/* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 -/* 0x0002 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 -/* 0x0005 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 -/* 0x0007 reserved */ -/* 0x0008 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B -#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C -/* 0x0010 to 0x0017 is for custom filters */ -#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ - -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 - -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 - - __le32 tenant_id; - u8 reserved[4]; - __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved2[14]; - /* response section */ - u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF - u8 response_reserved[7]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); - -/* i40e_aqc_cloud_filters_element_bb is used when - * I40E_AQC_ADD_CLOUD_CMD_BB flag is set. - */ -struct i40e_aqc_cloud_filters_element_bb { - struct i40e_aqc_cloud_filters_element_data element; - u16 general_fields[32]; -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 -}; - -I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); - -struct i40e_aqc_remove_cloud_filters_completion { - __le16 perfect_ovlan_used; - __le16 perfect_ovlan_free; - __le16 vlan_used; - __le16 vlan_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); - -/* Replace filter Command 0x025F - * uses the i40e_aqc_replace_cloud_filters, - * and the generic indirect completion structure - */ -struct i40e_filter_data { - u8 filter_type; - u8 input[3]; -}; - -I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); - -struct i40e_aqc_replace_cloud_filters_cmd { - u8 valid_flags; -#define I40E_AQC_REPLACE_L1_FILTER 0x0 -#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 -#define I40E_AQC_GET_CLOUD_FILTERS 0x2 -#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 -#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 - u8 old_filter_type; - u8 new_filter_type; - u8 tr_bit; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); - -struct i40e_aqc_replace_cloud_filters_cmd_buf { - u8 data[32]; -/* Filter type INPUT codes*/ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7) - -/* Field Vector offsets */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 - -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 - struct i40e_filter_data filters[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf); - -/* Add Mirror Rule (indirect or direct 0x0260) - * Delete Mirror Rule (indirect or direct 0x0261) - * note: some rule types (4,5) do not use an external buffer. - * take care to set the flags correctly. - */ -struct i40e_aqc_add_delete_mirror_rule { - __le16 seid; - __le16 rule_type; -#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 -#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ - I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 -#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 - __le16 num_entries; - __le16 destination; /* VSI for add, rule id for delete */ - __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); - -struct i40e_aqc_add_delete_mirror_rule_completion { - u8 reserved[2]; - __le16 rule_id; /* only used on add */ - __le16 mirror_rules_used; - __le16 mirror_rules_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); - -/* Dynamic Device Personalization */ -struct i40e_aqc_write_personalization_profile { - u8 flags; - u8 reserved[3]; - __le32 profile_track_id; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); - -struct i40e_aqc_write_ddp_resp { - __le32 error_offset; - __le32 error_info; - __le32 addr_high; - __le32 addr_low; -}; - -struct i40e_aqc_get_applied_profiles { - u8 flags; -#define I40E_AQC_GET_DDP_GET_CONF 0x1 -#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 - u8 rsv[3]; - __le32 reserved; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles); - -/* DCB 0x03xx*/ - -/* PFC Ignore (direct 0x0301) - * the command and response use the same descriptor structure - */ -struct i40e_aqc_pfc_ignore { - u8 tc_bitmap; - u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); - -/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure - * with no parameters - */ - -/* TX scheduler 0x04xx */ - -/* Almost all the indirect commands use - * this generic struct to pass the SEID in param0 - */ -struct i40e_aqc_tx_sched_ind { - __le16 vsi_seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); - -/* Several commands respond with a set of queue set handles */ -struct i40e_aqc_qs_handles_resp { - __le16 qs_handles[8]; -}; - -/* Configure VSI BW limits (direct 0x0400) */ -struct i40e_aqc_configure_vsi_bw_limit { - __le16 vsi_seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_credit; /* 0-3, limit = 2^max */ - u8 reserved2[7]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); - -/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) - * responds with i40e_aqc_qs_handles_resp - */ -struct i40e_aqc_configure_vsi_ets_sla_bw_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); - -/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) - * responds with i40e_aqc_qs_handles_resp - */ -struct i40e_aqc_configure_vsi_tc_bw_data { - u8 tc_valid_bits; - u8 reserved[3]; - u8 tc_bw_credits[8]; - u8 reserved1[4]; - __le16 qs_handles[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); - -/* Query vsi bw configuration (indirect 0x0408) */ -struct i40e_aqc_query_vsi_bw_config_resp { - u8 tc_valid_bits; - u8 tc_suspended_bits; - u8 reserved[14]; - __le16 qs_handles[8]; - u8 reserved1[4]; - __le16 port_bw_limit; - u8 reserved2[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved3[23]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); - -/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ -struct i40e_aqc_query_vsi_ets_sla_config_resp { - u8 tc_valid_bits; - u8 reserved[3]; - u8 share_credits[8]; - __le16 credits[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); - -/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ -struct i40e_aqc_configure_switching_comp_bw_limit { - __le16 seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved2[7]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); - -/* Enable Physical Port ETS (indirect 0x0413) - * Modify Physical Port ETS (indirect 0x0414) - * Disable Physical Port ETS (indirect 0x0415) - */ -struct i40e_aqc_configure_switching_comp_ets_data { - u8 reserved[4]; - u8 tc_valid_bits; - u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 - u8 tc_strict_priority_flags; - u8 reserved1[17]; - u8 tc_bw_share_credits[8]; - u8 reserved2[96]; -}; - -I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); - -/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ -struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credit[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, - i40e_aqc_configure_switching_comp_ets_bw_limit_data); - -/* Configure Switching Component Bandwidth Allocation per Tc - * (indirect 0x0417) - */ -struct i40e_aqc_configure_switching_comp_bw_config_data { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits; /* bool */ - u8 tc_bw_share_credits[8]; - u8 reserved1[20]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); - -/* Query Switching Component Configuration (indirect 0x0418) */ -struct i40e_aqc_query_switching_comp_ets_config_resp { - u8 tc_valid_bits; - u8 reserved[35]; - __le16 port_bw_limit; - u8 reserved1[2]; - u8 tc_bw_max; /* 0-3, limit = 2^max */ - u8 reserved2[23]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); - -/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ -struct i40e_aqc_query_port_ets_config_resp { - u8 reserved[4]; - u8 tc_valid_bits; - u8 reserved1; - u8 tc_strict_priority_bits; - u8 reserved2; - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; - - /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved3[32]; -}; - -I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); - -/* Query Switching Component Bandwidth Allocation per Traffic Type - * (indirect 0x041A) - */ -struct i40e_aqc_query_switching_comp_bw_config_resp { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits_enable; /* bool */ - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); - -/* Suspend/resume port TX traffic - * (direct 0x041B and 0x041C) uses the generic SEID struct - */ - -/* Configure partition BW - * (indirect 0x041D) - */ -struct i40e_aqc_configure_partition_bw_data { - __le16 pf_valid_bits; - u8 min_bw[16]; /* guaranteed bandwidth */ - u8 max_bw[16]; /* bandwidth limit */ -}; - -I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); - -/* Get and set the active HMC resource profile and status. - * (direct 0x0500) and (direct 0x0501) - */ -struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); - -enum i40e_aq_hmc_profile { - /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, -}; - -/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ - -/* set in param0 for get phy abilities to report qualified modules */ -#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 -#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 - -enum i40e_aq_phy_type { - I40E_PHY_TYPE_SGMII = 0x0, - I40E_PHY_TYPE_1000BASE_KX = 0x1, - I40E_PHY_TYPE_10GBASE_KX4 = 0x2, - I40E_PHY_TYPE_10GBASE_KR = 0x3, - I40E_PHY_TYPE_40GBASE_KR4 = 0x4, - I40E_PHY_TYPE_XAUI = 0x5, - I40E_PHY_TYPE_XFI = 0x6, - I40E_PHY_TYPE_SFI = 0x7, - I40E_PHY_TYPE_XLAUI = 0x8, - I40E_PHY_TYPE_XLPPI = 0x9, - I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, - I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, - I40E_PHY_TYPE_10GBASE_AOC = 0xC, - I40E_PHY_TYPE_40GBASE_AOC = 0xD, - I40E_PHY_TYPE_UNRECOGNIZED = 0xE, - I40E_PHY_TYPE_UNSUPPORTED = 0xF, - I40E_PHY_TYPE_100BASE_TX = 0x11, - I40E_PHY_TYPE_1000BASE_T = 0x12, - I40E_PHY_TYPE_10GBASE_T = 0x13, - I40E_PHY_TYPE_10GBASE_SR = 0x14, - I40E_PHY_TYPE_10GBASE_LR = 0x15, - I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, - I40E_PHY_TYPE_10GBASE_CR1 = 0x17, - I40E_PHY_TYPE_40GBASE_CR4 = 0x18, - I40E_PHY_TYPE_40GBASE_SR4 = 0x19, - I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, - I40E_PHY_TYPE_1000BASE_SX = 0x1B, - I40E_PHY_TYPE_1000BASE_LX = 0x1C, - I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, - I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, - I40E_PHY_TYPE_25GBASE_KR = 0x1F, - I40E_PHY_TYPE_25GBASE_CR = 0x20, - I40E_PHY_TYPE_25GBASE_SR = 0x21, - I40E_PHY_TYPE_25GBASE_LR = 0x22, - I40E_PHY_TYPE_25GBASE_AOC = 0x23, - I40E_PHY_TYPE_25GBASE_ACC = 0x24, - I40E_PHY_TYPE_MAX, - I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, - I40E_PHY_TYPE_EMPTY = 0xFE, - I40E_PHY_TYPE_DEFAULT = 0xFF, -}; - -#define I40E_LINK_SPEED_100MB_SHIFT 0x1 -#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 -#define I40E_LINK_SPEED_10GB_SHIFT 0x3 -#define I40E_LINK_SPEED_40GB_SHIFT 0x4 -#define I40E_LINK_SPEED_20GB_SHIFT 0x5 -#define I40E_LINK_SPEED_25GB_SHIFT 0x6 - -enum i40e_aq_link_speed { - I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), - I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT), - I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT), -}; - -struct i40e_aqc_module_desc { - u8 oui[3]; - u8 reserved1; - u8 part_number[16]; - u8 revision[4]; - u8 reserved2[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); - -struct i40e_aq_get_phy_abilities_resp { - __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum bit patterns */ - u8 abilities; -#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 -#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 -#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 -#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 - __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 - __le32 eeer_val; - u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 phy_type_ext; -#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 -#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 -#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 -#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 -#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 -#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 - u8 fec_cfg_curr_mod_ext_info; -#define I40E_AQ_ENABLE_FEC_KR 0x01 -#define I40E_AQ_ENABLE_FEC_RS 0x02 -#define I40E_AQ_REQUEST_FEC_KR 0x04 -#define I40E_AQ_REQUEST_FEC_RS 0x08 -#define I40E_AQ_ENABLE_FEC_AUTO 0x10 -#define I40E_AQ_FEC -#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0 -#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5 - - u8 ext_comp_code; - u8 phy_id[4]; - u8 module_type[3]; - u8 qualified_module_count; -#define I40E_AQ_PHY_MAX_QMS 16 - struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; -}; - -I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); - -/* Set PHY Config (direct 0x0601) */ -struct i40e_aq_set_phy_config { /* same bits as above in all */ - __le32 phy_type; - u8 link_speed; - u8 abilities; -/* bits 0-2 use the values from get_phy_abilities_resp */ -#define I40E_AQ_PHY_ENABLE_LINK 0x08 -#define I40E_AQ_PHY_ENABLE_AN 0x10 -#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 - __le16 eee_capability; - __le32 eeer; - u8 low_power_ctrl; - u8 phy_type_ext; -#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 -#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 -#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 -#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 - u8 fec_config; -#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0) -#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1) -#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2) -#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3) -#define I40E_AQ_SET_FEC_AUTO BIT(4) -#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0 -#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT) - u8 reserved; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); - -/* Set MAC Config command data structure (direct 0x0603) */ -struct i40e_aq_set_mac_config { - __le16 max_frame_size; - u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 - u8 tx_timer_priority; /* bitmap */ - __le16 tx_timer_value; - __le16 fc_refresh_threshold; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); - -/* Restart Auto-Negotiation (direct 0x605) */ -struct i40e_aqc_set_link_restart_an { - u8 command; -#define I40E_AQ_PHY_RESTART_AN 0x02 -#define I40E_AQ_PHY_LINK_ENABLE 0x04 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); - -/* Get Link Status cmd & response data structure (direct 0x0607) */ -struct i40e_aqc_get_link_status { - __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 -#define I40E_AQ_LSE_DISABLE 0x2 -#define I40E_AQ_LSE_ENABLE 0x3 -/* only response uses this flag */ -#define I40E_AQ_LSE_IS_ENABLED 0x1 - u8 phy_type; /* i40e_aq_phy_type */ - u8 link_speed; /* i40e_aq_link_speed */ - u8 link_info; -#define I40E_AQ_LINK_UP 0x01 /* obsolete */ -#define I40E_AQ_LINK_UP_FUNCTION 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_LINK_UP_PORT 0x20 -#define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 - u8 an_info; -#define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 -#define I40E_AQ_LINK_PAUSE_TX 0x20 -#define I40E_AQ_LINK_PAUSE_RX 0x40 -#define I40E_AQ_QUALIFIED_MODULE 0x80 - u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 -/* 25G Error Codes */ -#define I40E_AQ_25G_NO_ERR 0X00 -#define I40E_AQ_25G_NOT_PRESENT 0X01 -#define I40E_AQ_25G_NVM_CRC_ERR 0X02 -#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 -#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 -#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 - u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ -/* Since firmware API 1.7 loopback field keeps power class info as well */ -#define I40E_AQ_LOOPBACK_MASK 0x07 -#define I40E_AQ_PWR_CLASS_SHIFT_LB 6 -#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) - __le16 max_frame_size; - u8 config; -#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 -#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 -#define I40E_AQ_CONFIG_CRC_ENA 0x04 -#define I40E_AQ_CONFIG_PACING_MASK 0x78 - union { - struct { - u8 power_desc; -#define I40E_AQ_LINK_POWER_CLASS_1 0x00 -#define I40E_AQ_LINK_POWER_CLASS_2 0x01 -#define I40E_AQ_LINK_POWER_CLASS_3 0x02 -#define I40E_AQ_LINK_POWER_CLASS_4 0x03 -#define I40E_AQ_PWR_CLASS_MASK 0x03 - u8 reserved[4]; - }; - struct { - u8 link_type[4]; - u8 link_type_ext; - }; - }; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); - -/* Set event mask command (direct 0x613) */ -struct i40e_aqc_set_phy_int_mask { - u8 reserved[8]; - __le16 event_mask; -#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 -#define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 -#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 - u8 reserved1[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); - -/* Get Local AN advt register (direct 0x0614) - * Set Local AN advt register (direct 0x0615) - * Get Link Partner AN advt register (direct 0x0616) - */ -struct i40e_aqc_an_advt_reg { - __le32 local_an_reg0; - __le16 local_an_reg1; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); - -/* Set Loopback mode (0x0618) */ -struct i40e_aqc_set_lb_mode { - __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); - -/* Set PHY Debug command (0x0622) */ -struct i40e_aqc_set_phy_debug { - u8 command_flags; -#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ - I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 -#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); - -enum i40e_aq_phy_reg_type { - I40E_AQC_PHY_REG_INTERNAL = 0x1, - I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, - I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 -}; - -/* Run PHY Activity (0x0626) */ -struct i40e_aqc_run_phy_activity { - __le16 activity_id; - u8 flags; - u8 reserved1; - __le32 control; - __le32 data; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); - -/* Set PHY Register command (0x0628) */ -/* Get PHY Register command (0x0629) */ -struct i40e_aqc_phy_register_access { - u8 phy_interface; -#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 -#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 -#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 - u8 dev_address; - u8 reserved1[2]; - __le32 reg_address; - __le32 reg_value; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); - -/* NVM Read command (indirect 0x0701) - * NVM Erase commands (direct 0x0702) - * NVM Update commands (indirect 0x0703) - */ -struct i40e_aqc_nvm_update { - u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 -#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 - u8 module_pointer; - __le16 length; - __le32 offset; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); - -/* NVM Config Read (indirect 0x0704) */ -struct i40e_aqc_nvm_config_read { - __le16 cmd_flags; -#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 -#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 -#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 - __le16 element_count; - __le16 element_id; /* Feature/field ID */ - __le16 element_id_msw; /* MSWord of field ID */ - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); - -/* NVM Config Write (indirect 0x0705) */ -struct i40e_aqc_nvm_config_write { - __le16 cmd_flags; - __le16 element_count; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); - -/* Used for 0x0704 as well as for 0x0705 commands */ -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) -#define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) -struct i40e_aqc_nvm_config_data_feature { - __le16 feature_id; -#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 -#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 -#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 - __le16 feature_options; - __le16 feature_selection; -}; - -I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); - -struct i40e_aqc_nvm_config_data_immediate_field { - __le32 field_id; - __le32 field_value; - __le16 field_options; - __le16 reserved; -}; - -I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); - -/* OEM Post Update (indirect 0x0720) - * no command data struct used - */ - struct i40e_aqc_nvm_oem_post_update { -#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 - u8 sel_data; - u8 reserved[7]; -}; - -I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); - -struct i40e_aqc_nvm_oem_post_update_buffer { - u8 str_len; - u8 dev_addr; - __le16 eeprom_addr; - u8 data[36]; -}; - -I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); - -/* Thermal Sensor (indirect 0x0721) - * read or set thermal sensor configs and values - * takes a sensor and command specific data buffer, not detailed here - */ -struct i40e_aqc_thermal_sensor { - u8 sensor_action; -#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 -#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 -#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); - -/* Send to PF command (indirect 0x0801) id is only used by PF - * Send to VF command (indirect 0x0802) id is only used by PF - * Send to Peer PF command (indirect 0x0803) - */ -struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); - -/* Alternate structure */ - -/* Direct write (direct 0x0900) - * Direct read (direct 0x0902) - */ -struct i40e_aqc_alternate_write { - __le32 address0; - __le32 data0; - __le32 address1; - __le32 data1; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); - -/* Indirect write (indirect 0x0901) - * Indirect read (indirect 0x0903) - */ - -struct i40e_aqc_alternate_ind_write { - __le32 address; - __le32 length; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); - -/* Done alternate write (direct 0x0904) - * uses i40e_aq_desc - */ -struct i40e_aqc_alternate_write_done { - __le16 cmd_flags; -#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 -#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 -#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 -#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); - -/* Set OEM mode (direct 0x0905) */ -struct i40e_aqc_alternate_set_mode { - __le32 mode; -#define I40E_AQ_ALTERNATE_MODE_NONE 0 -#define I40E_AQ_ALTERNATE_MODE_OEM 1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); - -/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ - -/* async events 0x10xx */ - -/* Lan Queue Overflow Event (direct, 0x1001) */ -struct i40e_aqc_lan_overflow { - __le32 prtdcb_rupto; - __le32 otx_ctl; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); - -/* Get LLDP MIB (indirect 0x0A00) */ -struct i40e_aqc_lldp_get_mib { - u8 type; - u8 reserved1; -#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 -#define I40E_AQ_LLDP_MIB_LOCAL 0x0 -#define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC -#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) -/* TX pause flags use I40E_AQ_LINK_TX_* above */ - __le16 local_len; - __le16 remote_len; - u8 reserved2[2]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); - -/* Configure LLDP MIB Change Event (direct 0x0A01) - * also used for the event (with type in the command field) - */ -struct i40e_aqc_lldp_update_mib { - u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 -#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); - -/* Add LLDP TLV (indirect 0x0A02) - * Delete LLDP TLV (indirect 0x0A04) - */ -struct i40e_aqc_lldp_add_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved1[1]; - __le16 len; - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); - -/* Update LLDP TLV (indirect 0x0A03) */ -struct i40e_aqc_lldp_update_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved; - __le16 old_len; - __le16 new_offset; - __le16 new_len; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); - -/* Stop LLDP (direct 0x0A05) */ -struct i40e_aqc_lldp_stop { - u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); - -/* Start LLDP (direct 0x0A06) */ - -struct i40e_aqc_lldp_start { - u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); - -/* Set DCB (direct 0x0303) */ -struct i40e_aqc_set_dcb_parameters { - u8 command; -#define I40E_AQ_DCB_SET_AGENT 0x1 -#define I40E_DCB_VALID 0x1 - u8 valid_flags; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters); - -/* Apply MIB changes (0x0A07) - * uses the generic struc as it contains no data - */ - -/* Add Udp Tunnel command and completion (direct 0x0B00) */ -struct i40e_aqc_add_udp_tunnel { - __le16 udp_port; - u8 reserved0[3]; - u8 protocol_type; -#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 -#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 -#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 -#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 - u8 reserved1[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); - -struct i40e_aqc_add_udp_tunnel_completion { - __le16 udp_port; - u8 filter_entry_index; - u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 - u8 total_filters; - u8 reserved[11]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); - -/* remove UDP Tunnel command (0x0B01) */ -struct i40e_aqc_remove_udp_tunnel { - u8 reserved[2]; - u8 index; /* 0 to 15 */ - u8 reserved2[13]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); - -struct i40e_aqc_del_udp_tunnel_completion { - __le16 udp_port; - u8 index; /* 0 to 15 */ - u8 multiple_pfs; - u8 total_filters_used; - u8 reserved1[11]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); - -struct i40e_aqc_get_set_rss_key { -#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) -#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 -#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ - I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) - __le16 vsi_id; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); - -struct i40e_aqc_get_set_rss_key_data { - u8 standard_rss_key[0x28]; - u8 extended_hash_key[0xc]; -}; - -I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); - -struct i40e_aqc_get_set_rss_lut { -#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) -#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ - I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) - __le16 vsi_id; -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ - BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) - -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 - __le16 flags; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); - -/* tunnel key structure 0x0B10 */ - -struct i40e_aqc_tunnel_key_structure_A0 { - __le16 key1_off; - __le16 key1_len; - __le16 key2_off; - __le16 key2_len; - __le16 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 - u8 resreved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0); - -struct i40e_aqc_tunnel_key_structure { - u8 key1_off; - u8 key2_off; - u8 key1_len; /* 0 to 15 */ - u8 key2_len; /* 0 to 15 */ - u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 - u8 network_key_index; -#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 -#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 -#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 -#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); - -/* OEM mode commands (direct 0xFE0x) */ -struct i40e_aqc_oem_param_change { - __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 - __le32 param_value1; - __le16 param_value2; - u8 reserved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); - -struct i40e_aqc_oem_state_change { - __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); - -/* Initialize OCSD (0xFE02, direct) */ -struct i40e_aqc_opc_oem_ocsd_initialize { - u8 type_status; - u8 reserved1[3]; - __le32 ocsd_memory_block_addr_high; - __le32 ocsd_memory_block_addr_low; - __le32 requested_update_interval; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); - -/* Initialize OCBB (0xFE03, direct) */ -struct i40e_aqc_opc_oem_ocbb_initialize { - u8 type_status; - u8 reserved1[3]; - __le32 ocbb_memory_block_addr_high; - __le32 ocbb_memory_block_addr_low; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); - -/* debug commands */ - -/* get device id (0xFF00) uses the generic structure */ - -/* set test more (0xFF01, internal) */ - -struct i40e_acq_set_test_mode { - u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 - u8 reserved[3]; - u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 - u8 reserved2[3]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); - -/* Debug Read Register command (0xFF03) - * Debug Write Register command (0xFF04) - */ -struct i40e_aqc_debug_reg_read_write { - __le32 reserved; - __le32 address; - __le32 value_high; - __le32 value_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); - -/* Scatter/gather Reg Read (indirect 0xFF05) - * Scatter/gather Reg Write (indirect 0xFF06) - */ - -/* i40e_aq_desc is used for the command */ -struct i40e_aqc_debug_reg_sg_element_data { - __le32 address; - __le32 value; -}; - -/* Debug Modify register (direct 0xFF07) */ -struct i40e_aqc_debug_modify_reg { - __le32 address; - __le32 value; - __le32 clear_mask; - __le32 set_mask; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); - -/* dump internal data (0xFF08, indirect) */ - -#define I40E_AQ_CLUSTER_ID_AUX 0 -#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 -#define I40E_AQ_CLUSTER_ID_TXSCHED 2 -#define I40E_AQ_CLUSTER_ID_HMC 3 -#define I40E_AQ_CLUSTER_ID_MAC0 4 -#define I40E_AQ_CLUSTER_ID_MAC1 5 -#define I40E_AQ_CLUSTER_ID_MAC2 6 -#define I40E_AQ_CLUSTER_ID_MAC3 7 -#define I40E_AQ_CLUSTER_ID_DCB 8 -#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 -#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 -#define I40E_AQ_CLUSTER_ID_ALTRAM 11 - -struct i40e_aqc_debug_dump_internals { - u8 cluster_id; - u8 table_id; - __le16 data_size; - __le32 idx; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); - -struct i40e_aqc_debug_modify_internals { - u8 cluster_id; - u8 cluster_specific_params[7]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); - -#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h deleted file mode 100644 index cb8689222c8b..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_ALLOC_H_ -#define _I40E_ALLOC_H_ - -struct i40e_hw; - -/* Memory allocation types */ -enum i40e_memory_type { - i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ - i40e_mem_asq_buf = 1, - i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ - i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ - i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ - i40e_mem_pd = 5, /* Page Descriptor */ - i40e_mem_bp = 6, /* Backing Page - 4KB */ - i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ - i40e_mem_reserved -}; - -/* prototype for functions used for dynamic memory allocation */ -i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, - struct i40e_dma_mem *mem, - enum i40e_memory_type type, - u64 size, u32 alignment); -i40e_status i40e_free_dma_mem(struct i40e_hw *hw, - struct i40e_dma_mem *mem); -i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, - struct i40e_virt_mem *mem, - u32 size); -i40e_status i40e_free_virt_mem(struct i40e_hw *hw, - struct i40e_virt_mem *mem); - -#endif /* _I40E_ALLOC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c deleted file mode 100644 index eea280ba411e..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ /dev/null @@ -1,1320 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#include "i40e_type.h" -#include "i40e_adminq.h" -#include "i40e_prototype.h" -#include <linux/avf/virtchnl.h> - -/** - * i40e_set_mac_type - Sets MAC type - * @hw: pointer to the HW structure - * - * This function sets the mac type of the adapter based on the - * vendor ID and device ID stored in the hw structure. - **/ -i40e_status i40e_set_mac_type(struct i40e_hw *hw) -{ - i40e_status status = 0; - - if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { - switch (hw->device_id) { - case I40E_DEV_ID_SFP_XL710: - case I40E_DEV_ID_QEMU: - case I40E_DEV_ID_KX_B: - case I40E_DEV_ID_KX_C: - case I40E_DEV_ID_QSFP_A: - case I40E_DEV_ID_QSFP_B: - case I40E_DEV_ID_QSFP_C: - case I40E_DEV_ID_10G_BASE_T: - case I40E_DEV_ID_10G_BASE_T4: - case I40E_DEV_ID_20G_KR2: - case I40E_DEV_ID_20G_KR2_A: - case I40E_DEV_ID_25G_B: - case I40E_DEV_ID_25G_SFP28: - hw->mac.type = I40E_MAC_XL710; - break; - case I40E_DEV_ID_SFP_X722: - case I40E_DEV_ID_1G_BASE_T_X722: - case I40E_DEV_ID_10G_BASE_T_X722: - case I40E_DEV_ID_SFP_I_X722: - hw->mac.type = I40E_MAC_X722; - break; - case I40E_DEV_ID_X722_VF: - hw->mac.type = I40E_MAC_X722_VF; - break; - case I40E_DEV_ID_VF: - case I40E_DEV_ID_VF_HV: - case I40E_DEV_ID_ADAPTIVE_VF: - hw->mac.type = I40E_MAC_VF; - break; - default: - hw->mac.type = I40E_MAC_GENERIC; - break; - } - } else { - status = I40E_ERR_DEVICE_NOT_SUPPORTED; - } - - hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", - hw->mac.type, status); - return status; -} - -/** - * i40evf_aq_str - convert AQ err code to a string - * @hw: pointer to the HW structure - * @aq_err: the AQ error code to convert - **/ -const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) -{ - switch (aq_err) { - case I40E_AQ_RC_OK: - return "OK"; - case I40E_AQ_RC_EPERM: - return "I40E_AQ_RC_EPERM"; - case I40E_AQ_RC_ENOENT: - return "I40E_AQ_RC_ENOENT"; - case I40E_AQ_RC_ESRCH: - return "I40E_AQ_RC_ESRCH"; - case I40E_AQ_RC_EINTR: - return "I40E_AQ_RC_EINTR"; - case I40E_AQ_RC_EIO: - return "I40E_AQ_RC_EIO"; - case I40E_AQ_RC_ENXIO: - return "I40E_AQ_RC_ENXIO"; - case I40E_AQ_RC_E2BIG: - return "I40E_AQ_RC_E2BIG"; - case I40E_AQ_RC_EAGAIN: - return "I40E_AQ_RC_EAGAIN"; - case I40E_AQ_RC_ENOMEM: - return "I40E_AQ_RC_ENOMEM"; - case I40E_AQ_RC_EACCES: - return "I40E_AQ_RC_EACCES"; - case I40E_AQ_RC_EFAULT: - return "I40E_AQ_RC_EFAULT"; - case I40E_AQ_RC_EBUSY: - return "I40E_AQ_RC_EBUSY"; - case I40E_AQ_RC_EEXIST: - return "I40E_AQ_RC_EEXIST"; - case I40E_AQ_RC_EINVAL: - return "I40E_AQ_RC_EINVAL"; - case I40E_AQ_RC_ENOTTY: - return "I40E_AQ_RC_ENOTTY"; - case I40E_AQ_RC_ENOSPC: - return "I40E_AQ_RC_ENOSPC"; - case I40E_AQ_RC_ENOSYS: - return "I40E_AQ_RC_ENOSYS"; - case I40E_AQ_RC_ERANGE: - return "I40E_AQ_RC_ERANGE"; - case I40E_AQ_RC_EFLUSHED: - return "I40E_AQ_RC_EFLUSHED"; - case I40E_AQ_RC_BAD_ADDR: - return "I40E_AQ_RC_BAD_ADDR"; - case I40E_AQ_RC_EMODE: - return "I40E_AQ_RC_EMODE"; - case I40E_AQ_RC_EFBIG: - return "I40E_AQ_RC_EFBIG"; - } - - snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); - return hw->err_str; -} - -/** - * i40evf_stat_str - convert status err code to a string - * @hw: pointer to the HW structure - * @stat_err: the status error code to convert - **/ -const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err) -{ - switch (stat_err) { - case 0: - return "OK"; - case I40E_ERR_NVM: - return "I40E_ERR_NVM"; - case I40E_ERR_NVM_CHECKSUM: - return "I40E_ERR_NVM_CHECKSUM"; - case I40E_ERR_PHY: - return "I40E_ERR_PHY"; - case I40E_ERR_CONFIG: - return "I40E_ERR_CONFIG"; - case I40E_ERR_PARAM: - return "I40E_ERR_PARAM"; - case I40E_ERR_MAC_TYPE: - return "I40E_ERR_MAC_TYPE"; - case I40E_ERR_UNKNOWN_PHY: - return "I40E_ERR_UNKNOWN_PHY"; - case I40E_ERR_LINK_SETUP: - return "I40E_ERR_LINK_SETUP"; - case I40E_ERR_ADAPTER_STOPPED: - return "I40E_ERR_ADAPTER_STOPPED"; - case I40E_ERR_INVALID_MAC_ADDR: - return "I40E_ERR_INVALID_MAC_ADDR"; - case I40E_ERR_DEVICE_NOT_SUPPORTED: - return "I40E_ERR_DEVICE_NOT_SUPPORTED"; - case I40E_ERR_MASTER_REQUESTS_PENDING: - return "I40E_ERR_MASTER_REQUESTS_PENDING"; - case I40E_ERR_INVALID_LINK_SETTINGS: - return "I40E_ERR_INVALID_LINK_SETTINGS"; - case I40E_ERR_AUTONEG_NOT_COMPLETE: - return "I40E_ERR_AUTONEG_NOT_COMPLETE"; - case I40E_ERR_RESET_FAILED: - return "I40E_ERR_RESET_FAILED"; - case I40E_ERR_SWFW_SYNC: - return "I40E_ERR_SWFW_SYNC"; - case I40E_ERR_NO_AVAILABLE_VSI: - return "I40E_ERR_NO_AVAILABLE_VSI"; - case I40E_ERR_NO_MEMORY: - return "I40E_ERR_NO_MEMORY"; - case I40E_ERR_BAD_PTR: - return "I40E_ERR_BAD_PTR"; - case I40E_ERR_RING_FULL: - return "I40E_ERR_RING_FULL"; - case I40E_ERR_INVALID_PD_ID: - return "I40E_ERR_INVALID_PD_ID"; - case I40E_ERR_INVALID_QP_ID: - return "I40E_ERR_INVALID_QP_ID"; - case I40E_ERR_INVALID_CQ_ID: - return "I40E_ERR_INVALID_CQ_ID"; - case I40E_ERR_INVALID_CEQ_ID: - return "I40E_ERR_INVALID_CEQ_ID"; - case I40E_ERR_INVALID_AEQ_ID: - return "I40E_ERR_INVALID_AEQ_ID"; - case I40E_ERR_INVALID_SIZE: - return "I40E_ERR_INVALID_SIZE"; - case I40E_ERR_INVALID_ARP_INDEX: - return "I40E_ERR_INVALID_ARP_INDEX"; - case I40E_ERR_INVALID_FPM_FUNC_ID: - return "I40E_ERR_INVALID_FPM_FUNC_ID"; - case I40E_ERR_QP_INVALID_MSG_SIZE: - return "I40E_ERR_QP_INVALID_MSG_SIZE"; - case I40E_ERR_QP_TOOMANY_WRS_POSTED: - return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; - case I40E_ERR_INVALID_FRAG_COUNT: - return "I40E_ERR_INVALID_FRAG_COUNT"; - case I40E_ERR_QUEUE_EMPTY: - return "I40E_ERR_QUEUE_EMPTY"; - case I40E_ERR_INVALID_ALIGNMENT: - return "I40E_ERR_INVALID_ALIGNMENT"; - case I40E_ERR_FLUSHED_QUEUE: - return "I40E_ERR_FLUSHED_QUEUE"; - case I40E_ERR_INVALID_PUSH_PAGE_INDEX: - return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; - case I40E_ERR_INVALID_IMM_DATA_SIZE: - return "I40E_ERR_INVALID_IMM_DATA_SIZE"; - case I40E_ERR_TIMEOUT: - return "I40E_ERR_TIMEOUT"; - case I40E_ERR_OPCODE_MISMATCH: - return "I40E_ERR_OPCODE_MISMATCH"; - case I40E_ERR_CQP_COMPL_ERROR: - return "I40E_ERR_CQP_COMPL_ERROR"; - case I40E_ERR_INVALID_VF_ID: - return "I40E_ERR_INVALID_VF_ID"; - case I40E_ERR_INVALID_HMCFN_ID: - return "I40E_ERR_INVALID_HMCFN_ID"; - case I40E_ERR_BACKING_PAGE_ERROR: - return "I40E_ERR_BACKING_PAGE_ERROR"; - case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: - return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; - case I40E_ERR_INVALID_PBLE_INDEX: - return "I40E_ERR_INVALID_PBLE_INDEX"; - case I40E_ERR_INVALID_SD_INDEX: - return "I40E_ERR_INVALID_SD_INDEX"; - case I40E_ERR_INVALID_PAGE_DESC_INDEX: - return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; - case I40E_ERR_INVALID_SD_TYPE: - return "I40E_ERR_INVALID_SD_TYPE"; - case I40E_ERR_MEMCPY_FAILED: - return "I40E_ERR_MEMCPY_FAILED"; - case I40E_ERR_INVALID_HMC_OBJ_INDEX: - return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; - case I40E_ERR_INVALID_HMC_OBJ_COUNT: - return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; - case I40E_ERR_INVALID_SRQ_ARM_LIMIT: - return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; - case I40E_ERR_SRQ_ENABLED: - return "I40E_ERR_SRQ_ENABLED"; - case I40E_ERR_ADMIN_QUEUE_ERROR: - return "I40E_ERR_ADMIN_QUEUE_ERROR"; - case I40E_ERR_ADMIN_QUEUE_TIMEOUT: - return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; - case I40E_ERR_BUF_TOO_SHORT: - return "I40E_ERR_BUF_TOO_SHORT"; - case I40E_ERR_ADMIN_QUEUE_FULL: - return "I40E_ERR_ADMIN_QUEUE_FULL"; - case I40E_ERR_ADMIN_QUEUE_NO_WORK: - return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; - case I40E_ERR_BAD_IWARP_CQE: - return "I40E_ERR_BAD_IWARP_CQE"; - case I40E_ERR_NVM_BLANK_MODE: - return "I40E_ERR_NVM_BLANK_MODE"; - case I40E_ERR_NOT_IMPLEMENTED: - return "I40E_ERR_NOT_IMPLEMENTED"; - case I40E_ERR_PE_DOORBELL_NOT_ENABLED: - return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; - case I40E_ERR_DIAG_TEST_FAILED: - return "I40E_ERR_DIAG_TEST_FAILED"; - case I40E_ERR_NOT_READY: - return "I40E_ERR_NOT_READY"; - case I40E_NOT_SUPPORTED: - return "I40E_NOT_SUPPORTED"; - case I40E_ERR_FIRMWARE_API_VERSION: - return "I40E_ERR_FIRMWARE_API_VERSION"; - case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: - return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; - } - - snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); - return hw->err_str; -} - -/** - * i40evf_debug_aq - * @hw: debug mask related to admin queue - * @mask: debug mask - * @desc: pointer to admin queue descriptor - * @buffer: pointer to command buffer - * @buf_len: max length of buffer - * - * Dumps debug log about adminq command with descriptor contents. - **/ -void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, - void *buffer, u16 buf_len) -{ - struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; - u8 *buf = (u8 *)buffer; - - if ((!(mask & hw->debug_mask)) || (desc == NULL)) - return; - - i40e_debug(hw, mask, - "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - le16_to_cpu(aq_desc->opcode), - le16_to_cpu(aq_desc->flags), - le16_to_cpu(aq_desc->datalen), - le16_to_cpu(aq_desc->retval)); - i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - le32_to_cpu(aq_desc->cookie_high), - le32_to_cpu(aq_desc->cookie_low)); - i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - le32_to_cpu(aq_desc->params.internal.param0), - le32_to_cpu(aq_desc->params.internal.param1)); - i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - le32_to_cpu(aq_desc->params.external.addr_high), - le32_to_cpu(aq_desc->params.external.addr_low)); - - if ((buffer != NULL) && (aq_desc->datalen != 0)) { - u16 len = le16_to_cpu(aq_desc->datalen); - - i40e_debug(hw, mask, "AQ CMD Buffer:\n"); - if (buf_len < len) - len = buf_len; - /* write the full 16-byte chunks */ - if (hw->debug_mask & mask) { - char prefix[27]; - - snprintf(prefix, sizeof(prefix), - "i40evf %02x:%02x.%x: \t0x", - hw->bus.bus_id, - hw->bus.device, - hw->bus.func); - - print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, - 16, 1, buf, len, false); - } - } -} - -/** - * i40evf_check_asq_alive - * @hw: pointer to the hw struct - * - * Returns true if Queue is enabled else false. - **/ -bool i40evf_check_asq_alive(struct i40e_hw *hw) -{ - if (hw->aq.asq.len) - return !!(rd32(hw, hw->aq.asq.len) & - I40E_VF_ATQLEN1_ATQENABLE_MASK); - else - return false; -} - -/** - * i40evf_aq_queue_shutdown - * @hw: pointer to the hw struct - * @unloading: is the driver unloading itself - * - * Tell the Firmware that we're shutting down the AdminQ and whether - * or not the driver is unloading as well. - **/ -i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, - bool unloading) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_queue_shutdown *cmd = - (struct i40e_aqc_queue_shutdown *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_queue_shutdown); - - if (unloading) - cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); - status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL); - - return status; -} - -/** - * i40e_aq_get_set_rss_lut - * @hw: pointer to the hardware structure - * @vsi_id: vsi fw index - * @pf_lut: for PF table set true, for VSI table set false - * @lut: pointer to the lut buffer provided by the caller - * @lut_size: size of the lut buffer - * @set: set true to set the table, false to get the table - * - * Internal function to get or set RSS look up table - **/ -static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, - u16 vsi_id, bool pf_lut, - u8 *lut, u16 lut_size, - bool set) -{ - i40e_status status; - struct i40e_aq_desc desc; - struct i40e_aqc_get_set_rss_lut *cmd_resp = - (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; - - if (set) - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_rss_lut); - else - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_rss_lut); - - /* Indirect command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); - - cmd_resp->vsi_id = - cpu_to_le16((u16)((vsi_id << - I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & - I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); - - if (pf_lut) - cmd_resp->flags |= cpu_to_le16((u16) - ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); - else - cmd_resp->flags |= cpu_to_le16((u16) - ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); - - status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL); - - return status; -} - -/** - * i40evf_aq_get_rss_lut - * @hw: pointer to the hardware structure - * @vsi_id: vsi fw index - * @pf_lut: for PF table set true, for VSI table set false - * @lut: pointer to the lut buffer provided by the caller - * @lut_size: size of the lut buffer - * - * get the RSS lookup table, PF or VSI type - **/ -i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, - bool pf_lut, u8 *lut, u16 lut_size) -{ - return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, - false); -} - -/** - * i40evf_aq_set_rss_lut - * @hw: pointer to the hardware structure - * @vsi_id: vsi fw index - * @pf_lut: for PF table set true, for VSI table set false - * @lut: pointer to the lut buffer provided by the caller - * @lut_size: size of the lut buffer - * - * set the RSS lookup table, PF or VSI type - **/ -i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, - bool pf_lut, u8 *lut, u16 lut_size) -{ - return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); -} - -/** - * i40e_aq_get_set_rss_key - * @hw: pointer to the hw struct - * @vsi_id: vsi fw index - * @key: pointer to key info struct - * @set: set true to set the key, false to get the key - * - * get the RSS key per VSI - **/ -static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key, - bool set) -{ - i40e_status status; - struct i40e_aq_desc desc; - struct i40e_aqc_get_set_rss_key *cmd_resp = - (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; - u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); - - if (set) - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_rss_key); - else - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_rss_key); - - /* Indirect command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); - - cmd_resp->vsi_id = - cpu_to_le16((u16)((vsi_id << - I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & - I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); - - status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL); - - return status; -} - -/** - * i40evf_aq_get_rss_key - * @hw: pointer to the hw struct - * @vsi_id: vsi fw index - * @key: pointer to key info struct - * - **/ -i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key) -{ - return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); -} - -/** - * i40evf_aq_set_rss_key - * @hw: pointer to the hw struct - * @vsi_id: vsi fw index - * @key: pointer to key info struct - * - * set the RSS key per VSI - **/ -i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key) -{ - return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); -} - - -/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the - * hardware to a bit-field that can be used by SW to more easily determine the - * packet type. - * - * Macros are used to shorten the table lines and make this table human - * readable. - * - * We store the PTYPE in the top byte of the bit field - this is just so that - * we can check that the table doesn't have a row missing, as the index into - * the table should be the PTYPE. - * - * Typical work flow: - * - * IF NOT i40evf_ptype_lookup[ptype].known - * THEN - * Packet is unknown - * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP - * Use the rest of the fields to look at the tunnels, inner protocols, etc - * ELSE - * Use the enum i40e_rx_l2_ptype to decode the packet type - * ENDIF - */ - -/* macro to make the table lines short */ -#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - { PTYPE, \ - 1, \ - I40E_RX_PTYPE_OUTER_##OUTER_IP, \ - I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ - I40E_RX_PTYPE_##OUTER_FRAG, \ - I40E_RX_PTYPE_TUNNEL_##T, \ - I40E_RX_PTYPE_TUNNEL_END_##TE, \ - I40E_RX_PTYPE_##TEF, \ - I40E_RX_PTYPE_INNER_PROT_##I, \ - I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } - -#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ - { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } - -/* shorter macros makes the table fit but are terse */ -#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG -#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG -#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC - -/* Lookup table mapping the HW PTYPE to the bit field for decoding */ -struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { - /* L2 Packet types */ - I40E_PTT_UNUSED_ENTRY(0), - I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), - I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT_UNUSED_ENTRY(4), - I40E_PTT_UNUSED_ENTRY(5), - I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT_UNUSED_ENTRY(8), - I40E_PTT_UNUSED_ENTRY(9), - I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - - /* Non Tunneled IPv4 */ - I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(25), - I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), - I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), - I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv4 --> IPv4 */ - I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(32), - I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> IPv6 */ - I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(39), - I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT */ - I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> IPv4 */ - I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(47), - I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> IPv6 */ - I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(54), - I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC */ - I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ - I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(62), - I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ - I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(69), - I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC/VLAN */ - I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ - I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(77), - I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ - I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(84), - I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* Non Tunneled IPv6 */ - I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), - I40E_PTT_UNUSED_ENTRY(91), - I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), - I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), - I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv6 --> IPv4 */ - I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(98), - I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> IPv6 */ - I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(105), - I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT */ - I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> IPv4 */ - I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(113), - I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> IPv6 */ - I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(120), - I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC */ - I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ - I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(128), - I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ - I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(135), - I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN */ - I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ - I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(143), - I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ - I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(150), - I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* unused entries */ - I40E_PTT_UNUSED_ENTRY(154), - I40E_PTT_UNUSED_ENTRY(155), - I40E_PTT_UNUSED_ENTRY(156), - I40E_PTT_UNUSED_ENTRY(157), - I40E_PTT_UNUSED_ENTRY(158), - I40E_PTT_UNUSED_ENTRY(159), - - I40E_PTT_UNUSED_ENTRY(160), - I40E_PTT_UNUSED_ENTRY(161), - I40E_PTT_UNUSED_ENTRY(162), - I40E_PTT_UNUSED_ENTRY(163), - I40E_PTT_UNUSED_ENTRY(164), - I40E_PTT_UNUSED_ENTRY(165), - I40E_PTT_UNUSED_ENTRY(166), - I40E_PTT_UNUSED_ENTRY(167), - I40E_PTT_UNUSED_ENTRY(168), - I40E_PTT_UNUSED_ENTRY(169), - - I40E_PTT_UNUSED_ENTRY(170), - I40E_PTT_UNUSED_ENTRY(171), - I40E_PTT_UNUSED_ENTRY(172), - I40E_PTT_UNUSED_ENTRY(173), - I40E_PTT_UNUSED_ENTRY(174), - I40E_PTT_UNUSED_ENTRY(175), - I40E_PTT_UNUSED_ENTRY(176), - I40E_PTT_UNUSED_ENTRY(177), - I40E_PTT_UNUSED_ENTRY(178), - I40E_PTT_UNUSED_ENTRY(179), - - I40E_PTT_UNUSED_ENTRY(180), - I40E_PTT_UNUSED_ENTRY(181), - I40E_PTT_UNUSED_ENTRY(182), - I40E_PTT_UNUSED_ENTRY(183), - I40E_PTT_UNUSED_ENTRY(184), - I40E_PTT_UNUSED_ENTRY(185), - I40E_PTT_UNUSED_ENTRY(186), - I40E_PTT_UNUSED_ENTRY(187), - I40E_PTT_UNUSED_ENTRY(188), - I40E_PTT_UNUSED_ENTRY(189), - - I40E_PTT_UNUSED_ENTRY(190), - I40E_PTT_UNUSED_ENTRY(191), - I40E_PTT_UNUSED_ENTRY(192), - I40E_PTT_UNUSED_ENTRY(193), - I40E_PTT_UNUSED_ENTRY(194), - I40E_PTT_UNUSED_ENTRY(195), - I40E_PTT_UNUSED_ENTRY(196), - I40E_PTT_UNUSED_ENTRY(197), - I40E_PTT_UNUSED_ENTRY(198), - I40E_PTT_UNUSED_ENTRY(199), - - I40E_PTT_UNUSED_ENTRY(200), - I40E_PTT_UNUSED_ENTRY(201), - I40E_PTT_UNUSED_ENTRY(202), - I40E_PTT_UNUSED_ENTRY(203), - I40E_PTT_UNUSED_ENTRY(204), - I40E_PTT_UNUSED_ENTRY(205), - I40E_PTT_UNUSED_ENTRY(206), - I40E_PTT_UNUSED_ENTRY(207), - I40E_PTT_UNUSED_ENTRY(208), - I40E_PTT_UNUSED_ENTRY(209), - - I40E_PTT_UNUSED_ENTRY(210), - I40E_PTT_UNUSED_ENTRY(211), - I40E_PTT_UNUSED_ENTRY(212), - I40E_PTT_UNUSED_ENTRY(213), - I40E_PTT_UNUSED_ENTRY(214), - I40E_PTT_UNUSED_ENTRY(215), - I40E_PTT_UNUSED_ENTRY(216), - I40E_PTT_UNUSED_ENTRY(217), - I40E_PTT_UNUSED_ENTRY(218), - I40E_PTT_UNUSED_ENTRY(219), - - I40E_PTT_UNUSED_ENTRY(220), - I40E_PTT_UNUSED_ENTRY(221), - I40E_PTT_UNUSED_ENTRY(222), - I40E_PTT_UNUSED_ENTRY(223), - I40E_PTT_UNUSED_ENTRY(224), - I40E_PTT_UNUSED_ENTRY(225), - I40E_PTT_UNUSED_ENTRY(226), - I40E_PTT_UNUSED_ENTRY(227), - I40E_PTT_UNUSED_ENTRY(228), - I40E_PTT_UNUSED_ENTRY(229), - - I40E_PTT_UNUSED_ENTRY(230), - I40E_PTT_UNUSED_ENTRY(231), - I40E_PTT_UNUSED_ENTRY(232), - I40E_PTT_UNUSED_ENTRY(233), - I40E_PTT_UNUSED_ENTRY(234), - I40E_PTT_UNUSED_ENTRY(235), - I40E_PTT_UNUSED_ENTRY(236), - I40E_PTT_UNUSED_ENTRY(237), - I40E_PTT_UNUSED_ENTRY(238), - I40E_PTT_UNUSED_ENTRY(239), - - I40E_PTT_UNUSED_ENTRY(240), - I40E_PTT_UNUSED_ENTRY(241), - I40E_PTT_UNUSED_ENTRY(242), - I40E_PTT_UNUSED_ENTRY(243), - I40E_PTT_UNUSED_ENTRY(244), - I40E_PTT_UNUSED_ENTRY(245), - I40E_PTT_UNUSED_ENTRY(246), - I40E_PTT_UNUSED_ENTRY(247), - I40E_PTT_UNUSED_ENTRY(248), - I40E_PTT_UNUSED_ENTRY(249), - - I40E_PTT_UNUSED_ENTRY(250), - I40E_PTT_UNUSED_ENTRY(251), - I40E_PTT_UNUSED_ENTRY(252), - I40E_PTT_UNUSED_ENTRY(253), - I40E_PTT_UNUSED_ENTRY(254), - I40E_PTT_UNUSED_ENTRY(255) -}; - -/** - * i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: ptr to register value - * @cmd_details: pointer to command details structure or NULL - * - * Use the firmware to read the Rx control register, - * especially useful if the Rx unit is under heavy pressure - **/ -i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = - (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; - i40e_status status; - - if (!reg_val) - return I40E_ERR_PARAM; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_rx_ctl_reg_read); - - cmd_resp->address = cpu_to_le32(reg_addr); - - status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - if (status == 0) - *reg_val = le32_to_cpu(cmd_resp->value); - - return status; -} - -/** - * i40evf_read_rx_ctl - read from an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - **/ -u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) -{ - i40e_status status = 0; - bool use_register; - int retry = 5; - u32 val = 0; - - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); - if (!use_register) { -do_retry: - status = i40evf_aq_rx_ctl_read_register(hw, reg_addr, - &val, NULL); - if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { - usleep_range(1000, 2000); - retry--; - goto do_retry; - } - } - - /* if the AQ access failed, try the old-fashioned way */ - if (status || use_register) - val = rd32(hw, reg_addr); - - return val; -} - -/** - * i40evf_aq_rx_ctl_write_register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: register value - * @cmd_details: pointer to command details structure or NULL - * - * Use the firmware to write to an Rx control register, - * especially useful if the Rx unit is under heavy pressure - **/ -i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_rx_ctl_reg_read_write *cmd = - (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_rx_ctl_reg_write); - - cmd->address = cpu_to_le32(reg_addr); - cmd->value = cpu_to_le32(reg_val); - - status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - return status; -} - -/** - * i40evf_write_rx_ctl - write to an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: register value - **/ -void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) -{ - i40e_status status = 0; - bool use_register; - int retry = 5; - - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); - if (!use_register) { -do_retry: - status = i40evf_aq_rx_ctl_write_register(hw, reg_addr, - reg_val, NULL); - if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { - usleep_range(1000, 2000); - retry--; - goto do_retry; - } - } - - /* if the AQ access failed, try the old-fashioned way */ - if (status || use_register) - wr32(hw, reg_addr, reg_val); -} - -/** - * i40e_aq_send_msg_to_pf - * @hw: pointer to the hardware structure - * @v_opcode: opcodes for VF-PF communication - * @v_retval: return error code - * @msg: pointer to the msg buffer - * @msglen: msg length - * @cmd_details: pointer to command details - * - * Send message to PF driver using admin queue. By default, this message - * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for - * completion before returning. - **/ -i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_asq_cmd_details details; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); - desc.cookie_high = cpu_to_le32(v_opcode); - desc.cookie_low = cpu_to_le32(v_retval); - if (msglen) { - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF - | I40E_AQ_FLAG_RD)); - if (msglen > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(msglen); - } - if (!cmd_details) { - memset(&details, 0, sizeof(details)); - details.async = true; - cmd_details = &details; - } - status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details); - return status; -} - -/** - * i40e_vf_parse_hw_config - * @hw: pointer to the hardware structure - * @msg: pointer to the virtual channel VF resource structure - * - * Given a VF resource message from the PF, populate the hw struct - * with appropriate information. - **/ -void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct virtchnl_vf_resource *msg) -{ - struct virtchnl_vsi_resource *vsi_res; - int i; - - vsi_res = &msg->vsi_res[0]; - - hw->dev_caps.num_vsis = msg->num_vsis; - hw->dev_caps.num_rx_qp = msg->num_queue_pairs; - hw->dev_caps.num_tx_qp = msg->num_queue_pairs; - hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; - hw->dev_caps.dcb = msg->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_L2; - hw->dev_caps.fcoe = 0; - for (i = 0; i < msg->num_vsis; i++) { - if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { - ether_addr_copy(hw->mac.perm_addr, - vsi_res->default_mac_addr); - ether_addr_copy(hw->mac.addr, - vsi_res->default_mac_addr); - } - vsi_res++; - } -} - -/** - * i40e_vf_reset - * @hw: pointer to the hardware structure - * - * Send a VF_RESET message to the PF. Does not wait for response from PF - * as none will be forthcoming. Immediately after calling this function, - * the admin queue should be shut down and (optionally) reinitialized. - **/ -i40e_status i40e_vf_reset(struct i40e_hw *hw) -{ - return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, - 0, NULL, 0, NULL); -} - -/** - * i40evf_aq_write_ddp - Write dynamic device personalization (ddp) - * @hw: pointer to the hw struct - * @buff: command buffer (size in bytes = buff_size) - * @buff_size: buffer size in bytes - * @track_id: package tracking id - * @error_offset: returns error offset - * @error_info: returns error information - * @cmd_details: pointer to command details structure or NULL - **/ -enum -i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_write_personalization_profile *cmd = - (struct i40e_aqc_write_personalization_profile *) - &desc.params.raw; - struct i40e_aqc_write_ddp_resp *resp; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_write_personalization_profile); - - desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - - desc.datalen = cpu_to_le16(buff_size); - - cmd->profile_track_id = cpu_to_le32(track_id); - - status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - if (!status) { - resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; - if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); - if (error_info) - *error_info = le32_to_cpu(resp->error_info); - } - - return status; -} - -/** - * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp) - * @hw: pointer to the hw struct - * @buff: command buffer (size in bytes = buff_size) - * @buff_size: buffer size in bytes - * @flags: AdminQ command flags - * @cmd_details: pointer to command details structure or NULL - **/ -enum -i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_get_applied_profiles *cmd = - (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_personalization_profile_list); - - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(buff_size); - - cmd->flags = flags; - - status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - - return status; -} - -/** - * i40evf_find_segment_in_package - * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) - * @pkg_hdr: pointer to the package header to be searched - * - * This function searches a package file for a particular segment type. On - * success it returns a pointer to the segment header, otherwise it will - * return NULL. - **/ -struct i40e_generic_seg_header * -i40evf_find_segment_in_package(u32 segment_type, - struct i40e_package_header *pkg_hdr) -{ - struct i40e_generic_seg_header *segment; - u32 i; - - /* Search all package segments for the requested segment type */ - for (i = 0; i < pkg_hdr->segment_count; i++) { - segment = - (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + - pkg_hdr->segment_offset[i]); - - if (segment->type == segment_type) - return segment; - } - - return NULL; -} - -/** - * i40evf_write_profile - * @hw: pointer to the hardware structure - * @profile: pointer to the profile segment of the package to be downloaded - * @track_id: package tracking id - * - * Handles the download of a complete package. - */ -enum i40e_status_code -i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, - u32 track_id) -{ - i40e_status status = 0; - struct i40e_section_table *sec_tbl; - struct i40e_profile_section_header *sec = NULL; - u32 dev_cnt; - u32 vendor_dev_id; - u32 *nvm; - u32 section_size = 0; - u32 offset = 0, info = 0; - u32 i; - - dev_cnt = profile->device_table_count; - - for (i = 0; i < dev_cnt; i++) { - vendor_dev_id = profile->device_table[i].vendor_dev_id; - if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL) - if (hw->device_id == (vendor_dev_id & 0xFFFF)) - break; - } - if (i == dev_cnt) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP"); - return I40E_ERR_DEVICE_NOT_SUPPORTED; - } - - nvm = (u32 *)&profile->device_table[dev_cnt]; - sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; - - for (i = 0; i < sec_tbl->section_count; i++) { - sec = (struct i40e_profile_section_header *)((u8 *)profile + - sec_tbl->section_offset[i]); - - /* Skip 'AQ', 'note' and 'name' sections */ - if (sec->section.type != SECTION_TYPE_MMIO) - continue; - - section_size = sec->section.size + - sizeof(struct i40e_profile_section_header); - - /* Write profile */ - status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size, - track_id, &offset, &info, NULL); - if (status) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, - "Failed to write profile: offset %d, info %d", - offset, info); - break; - } - } - return status; -} - -/** - * i40evf_add_pinfo_to_list - * @hw: pointer to the hardware structure - * @profile: pointer to the profile segment of the package - * @profile_info_sec: buffer for information section - * @track_id: package tracking id - * - * Register a profile to the list of loaded profiles. - */ -enum i40e_status_code -i40evf_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id) -{ - i40e_status status = 0; - struct i40e_profile_section_header *sec = NULL; - struct i40e_profile_info *pinfo; - u32 offset = 0, info = 0; - - sec = (struct i40e_profile_section_header *)profile_info_sec; - sec->tbl_size = 1; - sec->data_end = sizeof(struct i40e_profile_section_header) + - sizeof(struct i40e_profile_info); - sec->section.type = SECTION_TYPE_INFO; - sec->section.offset = sizeof(struct i40e_profile_section_header); - sec->section.size = sizeof(struct i40e_profile_info); - pinfo = (struct i40e_profile_info *)(profile_info_sec + - sec->section.offset); - pinfo->track_id = track_id; - pinfo->version = profile->version; - pinfo->op = I40E_DDP_ADD_TRACKID; - memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); - - status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end, - track_id, &offset, &info, NULL); - return status; -} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h deleted file mode 100644 index f300bf271824..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_DEVIDS_H_ -#define _I40E_DEVIDS_H_ - -/* Device IDs */ -#define I40E_DEV_ID_SFP_XL710 0x1572 -#define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_B 0x1580 -#define I40E_DEV_ID_KX_C 0x1581 -#define I40E_DEV_ID_QSFP_A 0x1583 -#define I40E_DEV_ID_QSFP_B 0x1584 -#define I40E_DEV_ID_QSFP_C 0x1585 -#define I40E_DEV_ID_10G_BASE_T 0x1586 -#define I40E_DEV_ID_20G_KR2 0x1587 -#define I40E_DEV_ID_20G_KR2_A 0x1588 -#define I40E_DEV_ID_10G_BASE_T4 0x1589 -#define I40E_DEV_ID_25G_B 0x158A -#define I40E_DEV_ID_25G_SFP28 0x158B -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 -#define I40E_DEV_ID_ADAPTIVE_VF 0x1889 -#define I40E_DEV_ID_SFP_X722 0x37D0 -#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 -#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 -#define I40E_DEV_ID_SFP_I_X722 0x37D3 -#define I40E_DEV_ID_X722_VF 0x37CD - -#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ - (d) == I40E_DEV_ID_QSFP_B || \ - (d) == I40E_DEV_ID_QSFP_C) - -#endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h deleted file mode 100644 index 1c78de838857..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h +++ /dev/null @@ -1,215 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_HMC_H_ -#define _I40E_HMC_H_ - -#define I40E_HMC_MAX_BP_COUNT 512 - -/* forward-declare the HW struct for the compiler */ -struct i40e_hw; - -#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ -#define I40E_HMC_PD_CNT_IN_SD 512 -#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ -#define I40E_HMC_PAGED_BP_SIZE 4096 -#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 -#define I40E_FIRST_VF_FPM_ID 16 - -struct i40e_hmc_obj_info { - u64 base; /* base addr in FPM */ - u32 max_cnt; /* max count available for this hmc func */ - u32 cnt; /* count of objects driver actually wants to create */ - u64 size; /* size in bytes of one object */ -}; - -enum i40e_sd_entry_type { - I40E_SD_TYPE_INVALID = 0, - I40E_SD_TYPE_PAGED = 1, - I40E_SD_TYPE_DIRECT = 2 -}; - -struct i40e_hmc_bp { - enum i40e_sd_entry_type entry_type; - struct i40e_dma_mem addr; /* populate to be used by hw */ - u32 sd_pd_index; - u32 ref_cnt; -}; - -struct i40e_hmc_pd_entry { - struct i40e_hmc_bp bp; - u32 sd_index; - bool rsrc_pg; - bool valid; -}; - -struct i40e_hmc_pd_table { - struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ - struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ - struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ - - u32 ref_cnt; - u32 sd_index; -}; - -struct i40e_hmc_sd_entry { - enum i40e_sd_entry_type entry_type; - bool valid; - - union { - struct i40e_hmc_pd_table pd_table; - struct i40e_hmc_bp bp; - } u; -}; - -struct i40e_hmc_sd_table { - struct i40e_virt_mem addr; /* used to track sd_entry allocations */ - u32 sd_cnt; - u32 ref_cnt; - struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ -}; - -struct i40e_hmc_info { - u32 signature; - /* equals to pci func num for PF and dynamically allocated for VFs */ - u8 hmc_fn_id; - u16 first_sd_index; /* index of the first available SD */ - - /* hmc objects */ - struct i40e_hmc_obj_info *hmc_obj; - struct i40e_virt_mem hmc_obj_virt_mem; - struct i40e_hmc_sd_table sd_table; -}; - -#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) -#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) -#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) - -#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) -#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) -#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) - -/** - * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware - * @hw: pointer to our hw struct - * @pa: pointer to physical address - * @sd_index: segment descriptor index - * @type: if sd entry is direct or paged - **/ -#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ -{ \ - u32 val1, val2, val3; \ - val1 = (u32)(upper_32_bits(pa)); \ - val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ - I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ - ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ - I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ - BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ - val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ - wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ - wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ - wr32((hw), I40E_PFHMC_SDCMD, val3); \ -} - -/** - * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware - * @hw: pointer to our hw struct - * @sd_index: segment descriptor index - * @type: if sd entry is direct or paged - **/ -#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ -{ \ - u32 val2, val3; \ - val2 = (I40E_HMC_MAX_BP_COUNT << \ - I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ - ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ - I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ - val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ - wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ - wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ - wr32((hw), I40E_PFHMC_SDCMD, val3); \ -} - -/** - * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware - * @hw: pointer to our hw struct - * @sd_idx: segment descriptor index - * @pd_idx: page descriptor index - **/ -#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ - wr32((hw), I40E_PFHMC_PDINV, \ - (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ - ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) - -/** - * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit - * @hmc_info: pointer to the HMC configuration information structure - * @type: type of HMC resources we're searching - * @index: starting index for the object - * @cnt: number of objects we're trying to create - * @sd_idx: pointer to return index of the segment descriptor in question - * @sd_limit: pointer to return the maximum number of segment descriptors - * - * This function calculates the segment descriptor index and index limit - * for the resource defined by i40e_hmc_rsrc_type. - **/ -#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ -{ \ - u64 fpm_addr, fpm_limit; \ - fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ - (hmc_info)->hmc_obj[(type)].size * (index); \ - fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ - *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ - *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ - /* add one more to the limit to correct our range */ \ - *(sd_limit) += 1; \ -} - -/** - * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit - * @hmc_info: pointer to the HMC configuration information struct - * @type: HMC resource type we're examining - * @idx: starting index for the object - * @cnt: number of objects we're trying to create - * @pd_index: pointer to return page descriptor index - * @pd_limit: pointer to return page descriptor index limit - * - * Calculates the page descriptor index and index limit for the resource - * defined by i40e_hmc_rsrc_type. - **/ -#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ -{ \ - u64 fpm_adr, fpm_limit; \ - fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ - (hmc_info)->hmc_obj[(type)].size * (idx); \ - fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ - *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ - *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ - /* add one more to the limit to correct our range */ \ - *(pd_limit) += 1; \ -} -i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 sd_index, - enum i40e_sd_entry_type type, - u64 direct_mode_sz); - -i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 pd_index, - struct i40e_dma_mem *rsrc_pg); -i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf); -i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf); - -#endif /* _I40E_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h deleted file mode 100644 index 82b00f70a632..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h +++ /dev/null @@ -1,158 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_LAN_HMC_H_ -#define _I40E_LAN_HMC_H_ - -/* forward-declare the HW struct for the compiler */ -struct i40e_hw; - -/* HMC element context information */ - -/* Rx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ -struct i40e_hmc_obj_rxq { - u16 head; - u16 cpuid; /* bigger than needed, see above for reason */ - u64 base; - u16 qlen; -#define I40E_RXQ_CTX_DBUFF_SHIFT 7 - u16 dbuff; /* bigger than needed, see above for reason */ -#define I40E_RXQ_CTX_HBUFF_SHIFT 6 - u16 hbuff; /* bigger than needed, see above for reason */ - u8 dtype; - u8 dsize; - u8 crcstrip; - u8 fc_ena; - u8 l2tsel; - u8 hsplit_0; - u8 hsplit_1; - u8 showiv; - u32 rxmax; /* bigger than needed, see above for reason */ - u8 tphrdesc_ena; - u8 tphwdesc_ena; - u8 tphdata_ena; - u8 tphhead_ena; - u16 lrxqthresh; /* bigger than needed, see above for reason */ - u8 prefena; /* NOTE: normally must be set to 1 at init */ -}; - -/* Tx queue context data -* -* The sizes of the variables may be larger than needed due to crossing byte -* boundaries. If we do not have the width of the variable set to the correct -* size then we could end up shifting bits off the top of the variable when the -* variable is at the top of a byte and crosses over into the next byte. -*/ -struct i40e_hmc_obj_txq { - u16 head; - u8 new_context; - u64 base; - u8 fc_ena; - u8 timesync_ena; - u8 fd_ena; - u8 alt_vlan_ena; - u16 thead_wb; - u8 cpuid; - u8 head_wb_ena; - u16 qlen; - u8 tphrdesc_ena; - u8 tphrpacket_ena; - u8 tphwdesc_ena; - u64 head_wb_addr; - u32 crc; - u16 rdylist; - u8 rdylist_act; -}; - -/* for hsplit_0 field of Rx HMC context */ -enum i40e_hmc_obj_rx_hsplit_0 { - I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, -}; - -/* fcoe_cntx and fcoe_filt are for debugging purpose only */ -struct i40e_hmc_obj_fcoe_cntx { - u32 rsv[32]; -}; - -struct i40e_hmc_obj_fcoe_filt { - u32 rsv[8]; -}; - -/* Context sizes for LAN objects */ -enum i40e_hmc_lan_object_size { - I40E_HMC_LAN_OBJ_SZ_8 = 0x3, - I40E_HMC_LAN_OBJ_SZ_16 = 0x4, - I40E_HMC_LAN_OBJ_SZ_32 = 0x5, - I40E_HMC_LAN_OBJ_SZ_64 = 0x6, - I40E_HMC_LAN_OBJ_SZ_128 = 0x7, - I40E_HMC_LAN_OBJ_SZ_256 = 0x8, - I40E_HMC_LAN_OBJ_SZ_512 = 0x9, -}; - -#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 -#define I40E_HMC_OBJ_SIZE_TXQ 128 -#define I40E_HMC_OBJ_SIZE_RXQ 32 -#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128 -#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 - -enum i40e_hmc_lan_rsrc_type { - I40E_HMC_LAN_FULL = 0, - I40E_HMC_LAN_TX = 1, - I40E_HMC_LAN_RX = 2, - I40E_HMC_FCOE_CTX = 3, - I40E_HMC_FCOE_FILT = 4, - I40E_HMC_LAN_MAX = 5 -}; - -enum i40e_hmc_model { - I40E_HMC_MODEL_DIRECT_PREFERRED = 0, - I40E_HMC_MODEL_DIRECT_ONLY = 1, - I40E_HMC_MODEL_PAGED_ONLY = 2, - I40E_HMC_MODEL_UNKNOWN, -}; - -struct i40e_hmc_lan_create_obj_info { - struct i40e_hmc_info *hmc_info; - u32 rsrc_type; - u32 start_idx; - u32 count; - enum i40e_sd_entry_type entry_type; - u64 direct_mode_sz; -}; - -struct i40e_hmc_lan_delete_obj_info { - struct i40e_hmc_info *hmc_info; - u32 rsrc_type; - u32 start_idx; - u32 count; -}; - -i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, - u32 rxq_num, u32 fcoe_cntx_num, - u32 fcoe_filt_num); -i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, - enum i40e_hmc_model model); -i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw); - -i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue); -i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_txq *s); -i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue); -i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_rxq *s); - -#endif /* _I40E_LAN_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h deleted file mode 100644 index a358f4b9d5aa..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ /dev/null @@ -1,130 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_PROTOTYPE_H_ -#define _I40E_PROTOTYPE_H_ - -#include "i40e_type.h" -#include "i40e_alloc.h" -#include <linux/avf/virtchnl.h> - -/* Prototypes for shared code functions that are not in - * the standard function pointer structures. These are - * mostly because they are needed even before the init - * has happened and will assist in the early SW and FW - * setup. - */ - -/* adminq functions */ -i40e_status i40evf_init_adminq(struct i40e_hw *hw); -i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw); -void i40e_adminq_init_ring_data(struct i40e_hw *hw); -i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, - struct i40e_arq_event_info *e, - u16 *events_pending); -i40e_status i40evf_asq_send_command(struct i40e_hw *hw, - struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details); -bool i40evf_asq_done(struct i40e_hw *hw); - -/* debug function for adminq */ -void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, - void *desc, void *buffer, u16 buf_len); - -void i40e_idle_aq(struct i40e_hw *hw); -void i40evf_resume_aq(struct i40e_hw *hw); -bool i40evf_check_asq_alive(struct i40e_hw *hw); -i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); -const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); -const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); - -i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); -i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); -i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); -i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); - -i40e_status i40e_set_mac_type(struct i40e_hw *hw); - -extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[]; - -static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) -{ - return i40evf_ptype_lookup[ptype]; -} - -/* prototype for functions used for SW locks */ - -/* i40e_common for VF drivers*/ -void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct virtchnl_vf_resource *msg); -i40e_status i40e_vf_reset(struct i40e_hw *hw); -i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_set_filter_control(struct i40e_hw *hw, - struct i40e_filter_control_settings *settings); -i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, - u8 *mac_addr, u16 ethtype, u16 flags, - u16 vsi_seid, u16 queue, bool is_add, - struct i40e_control_filter_stats *stats, - struct i40e_asq_cmd_details *cmd_details); -void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, - u16 vsi_seid); -i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details); -u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); -i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details); -void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); -i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details); - -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, - u16 reg, u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, - u16 reg, u8 phy_addr, u16 value); -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 value); -u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); -i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); -i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details * - cmd_details); -i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details * - cmd_details); -struct i40e_generic_seg_header * -i40evf_find_segment_in_package(u32 segment_type, - struct i40e_package_header *pkg_header); -enum i40e_status_code -i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, - u32 track_id); -enum i40e_status_code -i40evf_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id); -#endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h deleted file mode 100644 index 49e1f57d99cc..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ /dev/null @@ -1,313 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_REGISTER_H_ -#define _I40E_REGISTER_H_ - -#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ -#define I40E_VFMSIX_PBA1_MAX_INDEX 19 -#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 -#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) -#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ -#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) -#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ -#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) -#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ -#define I40E_VF_ARQH1_ARQH_SHIFT 0 -#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) -#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ -#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) -#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 -#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) -#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 -#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) -#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 -#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) -#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) -#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ -#define I40E_VF_ARQT1_ARQT_SHIFT 0 -#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) -#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ -#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) -#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ -#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) -#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ -#define I40E_VF_ATQH1_ATQH_SHIFT 0 -#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) -#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ -#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) -#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 -#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) -#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 -#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) -#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 -#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) -#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) -#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ -#define I40E_VF_ATQT1_ATQT_SHIFT 0 -#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) -#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ -#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 -#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) -#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ -#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) -#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 -#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) -#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) -#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ -#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) -#define I40E_VFINT_ICR01_SWINT_SHIFT 31 -#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) -#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ -#define I40E_VFINT_ITR01_MAX_INDEX 2 -#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) -#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN1_MAX_INDEX 2 -#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) -#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_QRX_TAIL1_MAX_INDEX 15 -#define I40E_QRX_TAIL1_TAIL_SHIFT 0 -#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) -#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ -#define I40E_QTX_TAIL1_MAX_INDEX 15 -#define I40E_QTX_TAIL1_TAIL_SHIFT 0 -#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) -#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ -#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD_MAX_INDEX 16 -#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG_MAX_INDEX 16 -#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD_MAX_INDEX 16 -#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 -#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) -#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_VFQF_HENA_MAX_INDEX 1 -#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) -#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ -#define I40E_VFQF_HKEY_MAX_INDEX 12 -#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) -#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) -#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) -#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) -#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_VFQF_HLUT_MAX_INDEX 15 -#define I40E_VFQF_HLUT_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) -#define I40E_VFQF_HLUT_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) -#define I40E_VFQF_HLUT_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) -#define I40E_VFQF_HLUT_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) -#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION_MAX_INDEX 7 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) -#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ -#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 -#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) -#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) -#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) -#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) -#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ -#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 -#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) -#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ -#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 -#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) -#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ -#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 -#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) -#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) -#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ -#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 -#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) -#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ -#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 -#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) -#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) -#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ -#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 -#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) -#endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h deleted file mode 100644 index 094387db3c11..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ /dev/null @@ -1,1496 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_TYPE_H_ -#define _I40E_TYPE_H_ - -#include "i40e_status.h" -#include "i40e_osdep.h" -#include "i40e_register.h" -#include "i40e_adminq.h" -#include "i40e_hmc.h" -#include "i40e_lan_hmc.h" -#include "i40e_devids.h" - -/* I40E_MASK is a macro used on 32 bit registers */ -#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) - -#define I40E_MAX_VSI_QP 16 -#define I40E_MAX_VF_VSI 3 -#define I40E_MAX_CHAINED_RX_BUFFERS 5 -#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 - -/* Max default timeout in ms, */ -#define I40E_MAX_NVM_TIMEOUT 18000 - -/* Max timeout in ms for the phy to respond */ -#define I40E_MAX_PHY_TIMEOUT 500 - -/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ -#define I40E_MS_TO_GTIME(time) ((time) * 1000) - -/* forward declaration */ -struct i40e_hw; -typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); - -/* Data type manipulation macros. */ - -#define I40E_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) - -/* bitfields for Tx queue mapping in QTX_CTL */ -#define I40E_QTX_CTL_VF_QUEUE 0x0 -#define I40E_QTX_CTL_VM_QUEUE 0x1 -#define I40E_QTX_CTL_PF_QUEUE 0x2 - -/* debug masks - set these bits in hw->debug_mask to control output */ -enum i40e_debug_mask { - I40E_DEBUG_INIT = 0x00000001, - I40E_DEBUG_RELEASE = 0x00000002, - - I40E_DEBUG_LINK = 0x00000010, - I40E_DEBUG_PHY = 0x00000020, - I40E_DEBUG_HMC = 0x00000040, - I40E_DEBUG_NVM = 0x00000080, - I40E_DEBUG_LAN = 0x00000100, - I40E_DEBUG_FLOW = 0x00000200, - I40E_DEBUG_DCB = 0x00000400, - I40E_DEBUG_DIAG = 0x00000800, - I40E_DEBUG_FD = 0x00001000, - I40E_DEBUG_PACKAGE = 0x00002000, - - I40E_DEBUG_AQ_MESSAGE = 0x01000000, - I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, - I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, - I40E_DEBUG_AQ_COMMAND = 0x06000000, - I40E_DEBUG_AQ = 0x0F000000, - - I40E_DEBUG_USER = 0xF0000000, - - I40E_DEBUG_ALL = 0xFFFFFFFF -}; - -/* These are structs for managing the hardware information and the operations. - * The structures of function pointers are filled out at init time when we - * know for sure exactly which hardware we're working with. This gives us the - * flexibility of using the same main driver code but adapting to slightly - * different hardware needs as new parts are developed. For this architecture, - * the Firmware and AdminQ are intended to insulate the driver from most of the - * future changes, but these structures will also do part of the job. - */ -enum i40e_mac_type { - I40E_MAC_UNKNOWN = 0, - I40E_MAC_XL710, - I40E_MAC_VF, - I40E_MAC_X722, - I40E_MAC_X722_VF, - I40E_MAC_GENERIC, -}; - -enum i40e_media_type { - I40E_MEDIA_TYPE_UNKNOWN = 0, - I40E_MEDIA_TYPE_FIBER, - I40E_MEDIA_TYPE_BASET, - I40E_MEDIA_TYPE_BACKPLANE, - I40E_MEDIA_TYPE_CX4, - I40E_MEDIA_TYPE_DA, - I40E_MEDIA_TYPE_VIRTUAL -}; - -enum i40e_fc_mode { - I40E_FC_NONE = 0, - I40E_FC_RX_PAUSE, - I40E_FC_TX_PAUSE, - I40E_FC_FULL, - I40E_FC_PFC, - I40E_FC_DEFAULT -}; - -enum i40e_set_fc_aq_failures { - I40E_SET_FC_AQ_FAIL_NONE = 0, - I40E_SET_FC_AQ_FAIL_GET = 1, - I40E_SET_FC_AQ_FAIL_SET = 2, - I40E_SET_FC_AQ_FAIL_UPDATE = 4, - I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 -}; - -enum i40e_vsi_type { - I40E_VSI_MAIN = 0, - I40E_VSI_VMDQ1 = 1, - I40E_VSI_VMDQ2 = 2, - I40E_VSI_CTRL = 3, - I40E_VSI_FCOE = 4, - I40E_VSI_MIRROR = 5, - I40E_VSI_SRIOV = 6, - I40E_VSI_FDIR = 7, - I40E_VSI_TYPE_UNKNOWN -}; - -enum i40e_queue_type { - I40E_QUEUE_TYPE_RX = 0, - I40E_QUEUE_TYPE_TX, - I40E_QUEUE_TYPE_PE_CEQ, - I40E_QUEUE_TYPE_UNKNOWN -}; - -struct i40e_link_status { - enum i40e_aq_phy_type phy_type; - enum i40e_aq_link_speed link_speed; - u8 link_info; - u8 an_info; - u8 req_fec_info; - u8 fec_info; - u8 ext_info; - u8 loopback; - /* is Link Status Event notification to SW enabled */ - bool lse_enable; - u16 max_frame_size; - bool crc_enable; - u8 pacing; - u8 requested_speeds; - u8 module_type[3]; - /* 1st byte: module identifier */ -#define I40E_MODULE_TYPE_SFP 0x03 -#define I40E_MODULE_TYPE_QSFP 0x0D - /* 2nd byte: ethernet compliance codes for 10/40G */ -#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 -#define I40E_MODULE_TYPE_40G_LR4 0x02 -#define I40E_MODULE_TYPE_40G_SR4 0x04 -#define I40E_MODULE_TYPE_40G_CR4 0x08 -#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 -#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 -#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 -#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 - /* 3rd byte: ethernet compliance codes for 1G */ -#define I40E_MODULE_TYPE_1000BASE_SX 0x01 -#define I40E_MODULE_TYPE_1000BASE_LX 0x02 -#define I40E_MODULE_TYPE_1000BASE_CX 0x04 -#define I40E_MODULE_TYPE_1000BASE_T 0x08 -}; - -struct i40e_phy_info { - struct i40e_link_status link_info; - struct i40e_link_status link_info_old; - bool get_link_info; - enum i40e_media_type media_type; - /* all the phy types the NVM is capable of */ - u64 phy_types; -}; - -#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII) -#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) -#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) -#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) -#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) -#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI) -#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI) -#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI) -#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI) -#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI) -#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) -#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) -#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX) -#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T) -#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T) -#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) -#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) -#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) -#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) -#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) -#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) -#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) -#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) -#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \ - BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) -#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) -/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some - * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit - * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So, - * a shift is needed to adjust for this with values larger than 31. The - * only affected values are I40E_PHY_TYPE_25GBASE_*. - */ -#define I40E_PHY_TYPE_OFFSET 1 -#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_HW_CAP_MAX_GPIO 30 -/* Capabilities of a PF or a VF or the whole device */ -struct i40e_hw_capabilities { - u32 switch_mode; -#define I40E_NVM_IMAGE_TYPE_EVB 0x0 -#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 -#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 - - u32 management_mode; - u32 mng_protocols_over_mctp; -#define I40E_MNG_PROTOCOL_PLDM 0x2 -#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4 -#define I40E_MNG_PROTOCOL_NCSI 0x8 - u32 npar_enable; - u32 os2bmc; - u32 valid_functions; - bool sr_iov_1_1; - bool vmdq; - bool evb_802_1_qbg; /* Edge Virtual Bridging */ - bool evb_802_1_qbh; /* Bridge Port Extension */ - bool dcb; - bool fcoe; - bool iscsi; /* Indicates iSCSI enabled */ - bool flex10_enable; - bool flex10_capable; - u32 flex10_mode; -#define I40E_FLEX10_MODE_UNKNOWN 0x0 -#define I40E_FLEX10_MODE_DCC 0x1 -#define I40E_FLEX10_MODE_DCI 0x2 - - u32 flex10_status; -#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 -#define I40E_FLEX10_STATUS_VC_MODE 0x2 - - bool sec_rev_disabled; - bool update_disabled; -#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 -#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 - - bool mgmt_cem; - bool ieee_1588; - bool iwarp; - bool fd; - u32 fd_filters_guaranteed; - u32 fd_filters_best_effort; - bool rss; - u32 rss_table_size; - u32 rss_table_entry_width; - bool led[I40E_HW_CAP_MAX_GPIO]; - bool sdp[I40E_HW_CAP_MAX_GPIO]; - u32 nvm_image_type; - u32 num_flow_director_filters; - u32 num_vfs; - u32 vf_base_id; - u32 num_vsis; - u32 num_rx_qp; - u32 num_tx_qp; - u32 base_queue; - u32 num_msix_vectors; - u32 num_msix_vectors_vf; - u32 led_pin_num; - u32 sdp_pin_num; - u32 mdio_port_num; - u32 mdio_port_mode; - u8 rx_buf_chain_len; - u32 enabled_tcmap; - u32 maxtc; - u64 wr_csr_prot; -}; - -struct i40e_mac_info { - enum i40e_mac_type type; - u8 addr[ETH_ALEN]; - u8 perm_addr[ETH_ALEN]; - u8 san_addr[ETH_ALEN]; - u16 max_fcoeq; -}; - -enum i40e_aq_resources_ids { - I40E_NVM_RESOURCE_ID = 1 -}; - -enum i40e_aq_resource_access_type { - I40E_RESOURCE_READ = 1, - I40E_RESOURCE_WRITE -}; - -struct i40e_nvm_info { - u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ - u32 timeout; /* [ms] */ - u16 sr_size; /* Shadow RAM size in words */ - bool blank_nvm_mode; /* is NVM empty (no FW present)*/ - u16 version; /* NVM package version */ - u32 eetrack; /* NVM data version */ - u32 oem_ver; /* OEM version info */ -}; - -/* definitions used in NVM update support */ - -enum i40e_nvmupd_cmd { - I40E_NVMUPD_INVALID, - I40E_NVMUPD_READ_CON, - I40E_NVMUPD_READ_SNT, - I40E_NVMUPD_READ_LCB, - I40E_NVMUPD_READ_SA, - I40E_NVMUPD_WRITE_ERA, - I40E_NVMUPD_WRITE_CON, - I40E_NVMUPD_WRITE_SNT, - I40E_NVMUPD_WRITE_LCB, - I40E_NVMUPD_WRITE_SA, - I40E_NVMUPD_CSUM_CON, - I40E_NVMUPD_CSUM_SA, - I40E_NVMUPD_CSUM_LCB, - I40E_NVMUPD_STATUS, - I40E_NVMUPD_EXEC_AQ, - I40E_NVMUPD_GET_AQ_RESULT, - I40E_NVMUPD_GET_AQ_EVENT, -}; - -enum i40e_nvmupd_state { - I40E_NVMUPD_STATE_INIT, - I40E_NVMUPD_STATE_READING, - I40E_NVMUPD_STATE_WRITING, - I40E_NVMUPD_STATE_INIT_WAIT, - I40E_NVMUPD_STATE_WRITE_WAIT, - I40E_NVMUPD_STATE_ERROR -}; - -/* nvm_access definition and its masks/shifts need to be accessible to - * application, core driver, and shared code. Where is the right file? - */ -#define I40E_NVM_READ 0xB -#define I40E_NVM_WRITE 0xC - -#define I40E_NVM_MOD_PNT_MASK 0xFF - -#define I40E_NVM_TRANS_SHIFT 8 -#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) -#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12 -#define I40E_NVM_PRESERVATION_FLAGS_MASK \ - (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT) -#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01 -#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02 -#define I40E_NVM_CON 0x0 -#define I40E_NVM_SNT 0x1 -#define I40E_NVM_LCB 0x2 -#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) -#define I40E_NVM_ERA 0x4 -#define I40E_NVM_CSUM 0x8 -#define I40E_NVM_AQE 0xe -#define I40E_NVM_EXEC 0xf - -#define I40E_NVM_ADAPT_SHIFT 16 -#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) - -#define I40E_NVMUPD_MAX_DATA 4096 -#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ - -struct i40e_nvm_access { - u32 command; - u32 config; - u32 offset; /* in bytes */ - u32 data_size; /* in bytes */ - u8 data[1]; -}; - -/* (Q)SFP module access definitions */ -#define I40E_I2C_EEPROM_DEV_ADDR 0xA0 -#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 -#define I40E_MODULE_TYPE_ADDR 0x00 -#define I40E_MODULE_REVISION_ADDR 0x01 -#define I40E_MODULE_SFF_8472_COMP 0x5E -#define I40E_MODULE_SFF_8472_SWAP 0x5C -#define I40E_MODULE_SFF_ADDR_MODE 0x04 -#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D -#define I40E_MODULE_TYPE_QSFP28 0x11 -#define I40E_MODULE_QSFP_MAX_LEN 640 - -/* PCI bus types */ -enum i40e_bus_type { - i40e_bus_type_unknown = 0, - i40e_bus_type_pci, - i40e_bus_type_pcix, - i40e_bus_type_pci_express, - i40e_bus_type_reserved -}; - -/* PCI bus speeds */ -enum i40e_bus_speed { - i40e_bus_speed_unknown = 0, - i40e_bus_speed_33 = 33, - i40e_bus_speed_66 = 66, - i40e_bus_speed_100 = 100, - i40e_bus_speed_120 = 120, - i40e_bus_speed_133 = 133, - i40e_bus_speed_2500 = 2500, - i40e_bus_speed_5000 = 5000, - i40e_bus_speed_8000 = 8000, - i40e_bus_speed_reserved -}; - -/* PCI bus widths */ -enum i40e_bus_width { - i40e_bus_width_unknown = 0, - i40e_bus_width_pcie_x1 = 1, - i40e_bus_width_pcie_x2 = 2, - i40e_bus_width_pcie_x4 = 4, - i40e_bus_width_pcie_x8 = 8, - i40e_bus_width_32 = 32, - i40e_bus_width_64 = 64, - i40e_bus_width_reserved -}; - -/* Bus parameters */ -struct i40e_bus_info { - enum i40e_bus_speed speed; - enum i40e_bus_width width; - enum i40e_bus_type type; - - u16 func; - u16 device; - u16 lan_id; - u16 bus_id; -}; - -/* Flow control (FC) parameters */ -struct i40e_fc_info { - enum i40e_fc_mode current_mode; /* FC mode in effect */ - enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ -}; - -#define I40E_MAX_TRAFFIC_CLASS 8 -#define I40E_MAX_USER_PRIORITY 8 -#define I40E_DCBX_MAX_APPS 32 -#define I40E_LLDPDU_SIZE 1500 - -/* IEEE 802.1Qaz ETS Configuration data */ -struct i40e_ieee_ets_config { - u8 willing; - u8 cbs; - u8 maxtcs; - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz ETS Recommendation data */ -struct i40e_ieee_ets_recommend { - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz PFC Configuration data */ -struct i40e_ieee_pfc_config { - u8 willing; - u8 mbc; - u8 pfccap; - u8 pfcenable; -}; - -/* IEEE 802.1Qaz Application Priority data */ -struct i40e_ieee_app_priority_table { - u8 priority; - u8 selector; - u16 protocolid; -}; - -struct i40e_dcbx_config { - u32 numapps; - u32 tlv_status; /* CEE mode TLV status */ - struct i40e_ieee_ets_config etscfg; - struct i40e_ieee_ets_recommend etsrec; - struct i40e_ieee_pfc_config pfc; - struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS]; -}; - -/* Port hardware description */ -struct i40e_hw { - u8 __iomem *hw_addr; - void *back; - - /* subsystem structs */ - struct i40e_phy_info phy; - struct i40e_mac_info mac; - struct i40e_bus_info bus; - struct i40e_nvm_info nvm; - struct i40e_fc_info fc; - - /* pci info */ - u16 device_id; - u16 vendor_id; - u16 subsystem_device_id; - u16 subsystem_vendor_id; - u8 revision_id; - u8 port; - bool adapter_stopped; - - /* capabilities for entire device and PCI func */ - struct i40e_hw_capabilities dev_caps; - struct i40e_hw_capabilities func_caps; - - /* Flow Director shared filter space */ - u16 fdir_shared_filter_count; - - /* device profile info */ - u8 pf_id; - u16 main_vsi_seid; - - /* for multi-function MACs */ - u16 partition_id; - u16 num_partitions; - u16 num_ports; - - /* Closest numa node to the device */ - u16 numa_node; - - /* Admin Queue info */ - struct i40e_adminq_info aq; - - /* state of nvm update process */ - enum i40e_nvmupd_state nvmupd_state; - struct i40e_aq_desc nvm_wb_desc; - struct i40e_aq_desc nvm_aq_event_desc; - struct i40e_virt_mem nvm_buff; - bool nvm_release_on_done; - u16 nvm_wait_opcode; - - /* HMC info */ - struct i40e_hmc_info hmc; /* HMC info struct */ - - /* LLDP/DCBX Status */ - u16 dcbx_status; - -#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) -#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) - - /* DCBX info */ - struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ - struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ - struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ - - /* Used in set switch config AQ command */ - u16 switch_tag; - u16 first_tag; - u16 second_tag; - - /* debug mask */ - u32 debug_mask; - char err_str[16]; -}; - -static inline bool i40e_is_vf(struct i40e_hw *hw) -{ - return (hw->mac.type == I40E_MAC_VF || - hw->mac.type == I40E_MAC_X722_VF); -} - -struct i40e_driver_version { - u8 major_version; - u8 minor_version; - u8 build_version; - u8 subbuild_version; - u8 driver_string[32]; -}; - -/* RX Descriptors */ -union i40e_16byte_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { - struct { - union { - __le16 mirroring_status; - __le16 fcoe_ctx_id; - } mirr_fcoe; - __le16 l2tag1; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - __le32 fd_id; /* Flow director filter id */ - __le32 fcoe_param; /* FCoE DDP Context id */ - } hi_dword; - } qword0; - struct { - /* ext status/error/pktype/length */ - __le64 status_error_len; - } qword1; - } wb; /* writeback */ -}; - -union i40e_32byte_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - /* bit 0 of hdr_buffer_addr is DD bit */ - __le64 rsvd1; - __le64 rsvd2; - } read; - struct { - struct { - struct { - union { - __le16 mirroring_status; - __le16 fcoe_ctx_id; - } mirr_fcoe; - __le16 l2tag1; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - __le32 fcoe_param; /* FCoE DDP Context id */ - /* Flow director filter id in case of - * Programming status desc WB - */ - __le32 fd_id; - } hi_dword; - } qword0; - struct { - /* status/error/pktype/length */ - __le64 status_error_len; - } qword1; - struct { - __le16 ext_status; /* extended status */ - __le16 rsvd; - __le16 l2tag2_1; - __le16 l2tag2_2; - } qword2; - struct { - union { - __le32 flex_bytes_lo; - __le32 pe_status; - } lo_dword; - union { - __le32 flex_bytes_hi; - __le32 fd_id; - } hi_dword; - } qword3; - } wb; /* writeback */ -}; - -enum i40e_rx_desc_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_STATUS_DD_SHIFT = 0, - I40E_RX_DESC_STATUS_EOF_SHIFT = 1, - I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, - I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, - I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, - I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ - I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - /* Note: Bit 8 is reserved in X710 and XL710 */ - I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, - I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ - I40E_RX_DESC_STATUS_FLM_SHIFT = 11, - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ - I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, - I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, - I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - /* Note: For non-tunnel packets INT_UDP_0 is the right status for - * UDP header - */ - I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, - I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ -}; - -#define I40E_RXD_QW1_STATUS_SHIFT 0 -#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ - << I40E_RXD_QW1_STATUS_SHIFT) - -#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ - I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) - -#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ - BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) - -enum i40e_rx_desc_fltstat_values { - I40E_RX_DESC_FLTSTAT_NO_DATA = 0, - I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ - I40E_RX_DESC_FLTSTAT_RSV = 2, - I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, -}; - -#define I40E_RXD_QW1_ERROR_SHIFT 19 -#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) - -enum i40e_rx_desc_error_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_ERROR_RXE_SHIFT = 0, - I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, - I40E_RX_DESC_ERROR_HBO_SHIFT = 2, - I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ - I40E_RX_DESC_ERROR_IPE_SHIFT = 3, - I40E_RX_DESC_ERROR_L4E_SHIFT = 4, - I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, - I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, - I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 -}; - -enum i40e_rx_desc_error_l3l4e_fcoe_masks { - I40E_RX_DESC_ERROR_L3L4E_NONE = 0, - I40E_RX_DESC_ERROR_L3L4E_PROT = 1, - I40E_RX_DESC_ERROR_L3L4E_FC = 2, - I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, - I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 -}; - -#define I40E_RXD_QW1_PTYPE_SHIFT 30 -#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) - -/* Packet type non-ip values */ -enum i40e_rx_l2_ptype { - I40E_RX_PTYPE_L2_RESERVED = 0, - I40E_RX_PTYPE_L2_MAC_PAY2 = 1, - I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - I40E_RX_PTYPE_L2_FIP_PAY2 = 3, - I40E_RX_PTYPE_L2_OUI_PAY2 = 4, - I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, - I40E_RX_PTYPE_L2_ECP_PAY2 = 7, - I40E_RX_PTYPE_L2_EVB_PAY2 = 8, - I40E_RX_PTYPE_L2_QCN_PAY2 = 9, - I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, - I40E_RX_PTYPE_L2_ARP = 11, - I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, - I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, - I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, - I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, - I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, - I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, - I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, - I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, - I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, - I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, - I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, - I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 -}; - -struct i40e_rx_ptype_decoded { - u32 ptype:8; - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:1; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum i40e_rx_ptype_outer_ip { - I40E_RX_PTYPE_OUTER_L2 = 0, - I40E_RX_PTYPE_OUTER_IP = 1 -}; - -enum i40e_rx_ptype_outer_ip_ver { - I40E_RX_PTYPE_OUTER_NONE = 0, - I40E_RX_PTYPE_OUTER_IPV4 = 0, - I40E_RX_PTYPE_OUTER_IPV6 = 1 -}; - -enum i40e_rx_ptype_outer_fragmented { - I40E_RX_PTYPE_NOT_FRAG = 0, - I40E_RX_PTYPE_FRAG = 1 -}; - -enum i40e_rx_ptype_tunnel_type { - I40E_RX_PTYPE_TUNNEL_NONE = 0, - I40E_RX_PTYPE_TUNNEL_IP_IP = 1, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum i40e_rx_ptype_tunnel_end_prot { - I40E_RX_PTYPE_TUNNEL_END_NONE = 0, - I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, - I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum i40e_rx_ptype_inner_prot { - I40E_RX_PTYPE_INNER_PROT_NONE = 0, - I40E_RX_PTYPE_INNER_PROT_UDP = 1, - I40E_RX_PTYPE_INNER_PROT_TCP = 2, - I40E_RX_PTYPE_INNER_PROT_SCTP = 3, - I40E_RX_PTYPE_INNER_PROT_ICMP = 4, - I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 -}; - -enum i40e_rx_ptype_payload_layer { - I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - -#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 -#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ - I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - -#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 -#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ - I40E_RXD_QW1_LENGTH_HBUF_SHIFT) - -#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 -#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) - -enum i40e_rx_desc_ext_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, - I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, - I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ - I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ - I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, - I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, - I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, -}; - -enum i40e_rx_desc_pe_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ - I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ - I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ - I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, - I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, - I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, - I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, - I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, - I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 -}; - -#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 -#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 - -#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 -#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ - I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) - -#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 -#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ - I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) - -enum i40e_rx_prog_status_desc_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, - I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ -}; - -enum i40e_rx_prog_status_desc_prog_id_masks { - I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, - I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, - I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, -}; - -enum i40e_rx_prog_status_desc_error_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, - I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, - I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, - I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 -}; - -/* TX Descriptor */ -struct i40e_tx_desc { - __le64 buffer_addr; /* Address of descriptor's data buf */ - __le64 cmd_type_offset_bsz; -}; - -#define I40E_TXD_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) - -enum i40e_tx_desc_dtype_value { - I40E_TX_DESC_DTYPE_DATA = 0x0, - I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ - I40E_TX_DESC_DTYPE_CONTEXT = 0x1, - I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, - I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, - I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, - I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, - I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, - I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, - I40E_TX_DESC_DTYPE_DESC_DONE = 0xF -}; - -#define I40E_TXD_QW1_CMD_SHIFT 4 -#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) - -enum i40e_tx_desc_cmd_bits { - I40E_TX_DESC_CMD_EOP = 0x0001, - I40E_TX_DESC_CMD_RS = 0x0002, - I40E_TX_DESC_CMD_ICRC = 0x0004, - I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, - I40E_TX_DESC_CMD_DUMMY = 0x0010, - I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ - I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ - I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ - I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ - I40E_TX_DESC_CMD_FCOET = 0x0080, - I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ -}; - -#define I40E_TXD_QW1_OFFSET_SHIFT 16 -#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ - I40E_TXD_QW1_OFFSET_SHIFT) - -enum i40e_tx_desc_length_fields { - /* Note: These are predefined bit offsets */ - I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ - I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ -}; - -#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 -#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ - I40E_TXD_QW1_TX_BUF_SZ_SHIFT) - -#define I40E_TXD_QW1_L2TAG1_SHIFT 48 -#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) - -/* Context descriptors */ -struct i40e_tx_context_desc { - __le32 tunneling_params; - __le16 l2tag2; - __le16 rsvd; - __le64 type_cmd_tso_mss; -}; - -#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) - -#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 -#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) - -enum i40e_tx_ctx_desc_cmd_bits { - I40E_TX_CTX_DESC_TSO = 0x01, - I40E_TX_CTX_DESC_TSYN = 0x02, - I40E_TX_CTX_DESC_IL2TAG2 = 0x04, - I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, - I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, - I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, - I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, - I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, - I40E_TX_CTX_DESC_SWPE = 0x40 -}; - -#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 -#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) - -#define I40E_TXD_CTX_QW1_MSS_SHIFT 50 -#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ - I40E_TXD_CTX_QW1_MSS_SHIFT) - -#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 -#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) - -#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 -#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ - I40E_TXD_CTX_QW0_EXT_IP_SHIFT) - -enum i40e_tx_ctx_desc_eipt_offload { - I40E_TX_CTX_EXT_IP_NONE = 0x0, - I40E_TX_CTX_EXT_IP_IPV6 = 0x1, - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, - I40E_TX_CTX_EXT_IP_IPV4 = 0x3 -}; - -#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 -#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) - -#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 -#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) - -#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) - -#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ - BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) - -#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK - -#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 -#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ - I40E_TXD_CTX_QW0_NATLEN_SHIFT) - -#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 -#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ - I40E_TXD_CTX_QW0_DECTTL_SHIFT) - -#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 -#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) -struct i40e_filter_program_desc { - __le32 qindex_flex_ptype_vsi; - __le32 rsvd; - __le32 dtype_cmd_cntindex; - __le32 fd_id; -}; -#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 -#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ - I40E_TXD_FLTR_QW0_QINDEX_SHIFT) -#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 -#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ - I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) -#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 -#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ - I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) - -/* Packet Classifier Types for filters */ -enum i40e_filter_pctype { - /* Note: Values 0-28 are reserved for future use. - * Value 29, 30, 32 are not supported on XL710 and X710. - */ - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, - I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, - I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, - I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, - I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, - I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, - I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Values 37-38 are reserved for future use. - * Value 39, 40, 42 are not supported on XL710 and X710. - */ - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, - I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, - I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, - I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, - I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, - I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, - I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, - /* Note: Value 47 is reserved for future use */ - I40E_FILTER_PCTYPE_FCOE_OX = 48, - I40E_FILTER_PCTYPE_FCOE_RX = 49, - I40E_FILTER_PCTYPE_FCOE_OTHER = 50, - /* Note: Values 51-62 are reserved for future use */ - I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, -}; - -enum i40e_filter_program_desc_dest { - I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, - I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, - I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, -}; - -enum i40e_filter_program_desc_fd_status { - I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, -}; - -#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) - -#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 -#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) - -#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) - -enum i40e_filter_program_desc_pcmd { - I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, - I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, -}; - -#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) - -#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) - -#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ - I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) - -#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 -#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) - -enum i40e_filter_type { - I40E_FLOW_DIRECTOR_FLTR = 0, - I40E_PE_QUAD_HASH_FLTR = 1, - I40E_ETHERTYPE_FLTR, - I40E_FCOE_CTX_FLTR, - I40E_MAC_VLAN_FLTR, - I40E_HASH_FLTR -}; - -struct i40e_vsi_context { - u16 seid; - u16 uplink_seid; - u16 vsi_number; - u16 vsis_allocated; - u16 vsis_unallocated; - u16 flags; - u8 pf_num; - u8 vf_num; - u8 connection_type; - struct i40e_aqc_vsi_properties_data info; -}; - -struct i40e_veb_context { - u16 seid; - u16 uplink_seid; - u16 veb_number; - u16 vebs_allocated; - u16 vebs_unallocated; - u16 flags; - struct i40e_aqc_get_veb_parameters_completion info; -}; - -/* Statistics collected by each port, VSI, VEB, and S-channel */ -struct i40e_eth_stats { - u64 rx_bytes; /* gorc */ - u64 rx_unicast; /* uprc */ - u64 rx_multicast; /* mprc */ - u64 rx_broadcast; /* bprc */ - u64 rx_discards; /* rdpc */ - u64 rx_unknown_protocol; /* rupp */ - u64 tx_bytes; /* gotc */ - u64 tx_unicast; /* uptc */ - u64 tx_multicast; /* mptc */ - u64 tx_broadcast; /* bptc */ - u64 tx_discards; /* tdpc */ - u64 tx_errors; /* tepc */ -}; - -/* Statistics collected per VEB per TC */ -struct i40e_veb_tc_stats { - u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* Statistics collected by the MAC */ -struct i40e_hw_port_stats { - /* eth stats collected by the port */ - struct i40e_eth_stats eth; - - /* additional port specific stats */ - u64 tx_dropped_link_down; /* tdold */ - u64 crc_errors; /* crcerrs */ - u64 illegal_bytes; /* illerrc */ - u64 error_bytes; /* errbc */ - u64 mac_local_faults; /* mlfc */ - u64 mac_remote_faults; /* mrfc */ - u64 rx_length_errors; /* rlec */ - u64 link_xon_rx; /* lxonrxc */ - u64 link_xoff_rx; /* lxoffrxc */ - u64 priority_xon_rx[8]; /* pxonrxc[8] */ - u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ - u64 link_xon_tx; /* lxontxc */ - u64 link_xoff_tx; /* lxofftxc */ - u64 priority_xon_tx[8]; /* pxontxc[8] */ - u64 priority_xoff_tx[8]; /* pxofftxc[8] */ - u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ - u64 rx_size_64; /* prc64 */ - u64 rx_size_127; /* prc127 */ - u64 rx_size_255; /* prc255 */ - u64 rx_size_511; /* prc511 */ - u64 rx_size_1023; /* prc1023 */ - u64 rx_size_1522; /* prc1522 */ - u64 rx_size_big; /* prc9522 */ - u64 rx_undersize; /* ruc */ - u64 rx_fragments; /* rfc */ - u64 rx_oversize; /* roc */ - u64 rx_jabber; /* rjc */ - u64 tx_size_64; /* ptc64 */ - u64 tx_size_127; /* ptc127 */ - u64 tx_size_255; /* ptc255 */ - u64 tx_size_511; /* ptc511 */ - u64 tx_size_1023; /* ptc1023 */ - u64 tx_size_1522; /* ptc1522 */ - u64 tx_size_big; /* ptc9522 */ - u64 mac_short_packet_dropped; /* mspdc */ - u64 checksum_error; /* xec */ - /* flow director stats */ - u64 fd_atr_match; - u64 fd_sb_match; - u64 fd_atr_tunnel_match; - u32 fd_atr_status; - u32 fd_sb_status; - /* EEE LPI */ - u32 tx_lpi_status; - u32 rx_lpi_status; - u64 tx_lpi_count; /* etlpic */ - u64 rx_lpi_count; /* erlpic */ -}; - -/* Checksum and Shadow RAM pointers */ -#define I40E_SR_NVM_CONTROL_WORD 0x00 -#define I40E_EMP_MODULE_PTR 0x0F -#define I40E_SR_EMP_MODULE_PTR 0x48 -#define I40E_NVM_OEM_VER_OFF 0x83 -#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 -#define I40E_SR_NVM_WAKE_ON_LAN 0x19 -#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 -#define I40E_SR_NVM_EETRACK_LO 0x2D -#define I40E_SR_NVM_EETRACK_HI 0x2E -#define I40E_SR_VPD_PTR 0x2F -#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E -#define I40E_SR_SW_CHECKSUM_WORD 0x3F - -/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ -#define I40E_SR_VPD_MODULE_MAX_SIZE 1024 -#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 -#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 -#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) -#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) -#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) -#define I40E_PTR_TYPE BIT(15) - -/* Shadow RAM related */ -#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 -#define I40E_SR_WORDS_IN_1KB 512 -/* Checksum should be calculated such that after adding all the words, - * including the checksum word itself, the sum should be 0xBABA. - */ -#define I40E_SR_SW_CHECKSUM_BASE 0xBABA - -#define I40E_SRRD_SRCTL_ATTEMPTS 100000 - -enum i40e_switch_element_types { - I40E_SWITCH_ELEMENT_TYPE_MAC = 1, - I40E_SWITCH_ELEMENT_TYPE_PF = 2, - I40E_SWITCH_ELEMENT_TYPE_VF = 3, - I40E_SWITCH_ELEMENT_TYPE_EMP = 4, - I40E_SWITCH_ELEMENT_TYPE_BMC = 6, - I40E_SWITCH_ELEMENT_TYPE_PE = 16, - I40E_SWITCH_ELEMENT_TYPE_VEB = 17, - I40E_SWITCH_ELEMENT_TYPE_PA = 18, - I40E_SWITCH_ELEMENT_TYPE_VSI = 19, -}; - -/* Supported EtherType filters */ -enum i40e_ether_type_index { - I40E_ETHER_TYPE_1588 = 0, - I40E_ETHER_TYPE_FIP = 1, - I40E_ETHER_TYPE_OUI_EXTENDED = 2, - I40E_ETHER_TYPE_MAC_CONTROL = 3, - I40E_ETHER_TYPE_LLDP = 4, - I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, - I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, - I40E_ETHER_TYPE_QCN_CNM = 7, - I40E_ETHER_TYPE_8021X = 8, - I40E_ETHER_TYPE_ARP = 9, - I40E_ETHER_TYPE_RSV1 = 10, - I40E_ETHER_TYPE_RSV2 = 11, -}; - -/* Filter context base size is 1K */ -#define I40E_HASH_FILTER_BASE_SIZE 1024 -/* Supported Hash filter values */ -enum i40e_hash_filter_size { - I40E_HASH_FILTER_SIZE_1K = 0, - I40E_HASH_FILTER_SIZE_2K = 1, - I40E_HASH_FILTER_SIZE_4K = 2, - I40E_HASH_FILTER_SIZE_8K = 3, - I40E_HASH_FILTER_SIZE_16K = 4, - I40E_HASH_FILTER_SIZE_32K = 5, - I40E_HASH_FILTER_SIZE_64K = 6, - I40E_HASH_FILTER_SIZE_128K = 7, - I40E_HASH_FILTER_SIZE_256K = 8, - I40E_HASH_FILTER_SIZE_512K = 9, - I40E_HASH_FILTER_SIZE_1M = 10, -}; - -/* DMA context base size is 0.5K */ -#define I40E_DMA_CNTX_BASE_SIZE 512 -/* Supported DMA context values */ -enum i40e_dma_cntx_size { - I40E_DMA_CNTX_SIZE_512 = 0, - I40E_DMA_CNTX_SIZE_1K = 1, - I40E_DMA_CNTX_SIZE_2K = 2, - I40E_DMA_CNTX_SIZE_4K = 3, - I40E_DMA_CNTX_SIZE_8K = 4, - I40E_DMA_CNTX_SIZE_16K = 5, - I40E_DMA_CNTX_SIZE_32K = 6, - I40E_DMA_CNTX_SIZE_64K = 7, - I40E_DMA_CNTX_SIZE_128K = 8, - I40E_DMA_CNTX_SIZE_256K = 9, -}; - -/* Supported Hash look up table (LUT) sizes */ -enum i40e_hash_lut_size { - I40E_HASH_LUT_SIZE_128 = 0, - I40E_HASH_LUT_SIZE_512 = 1, -}; - -/* Structure to hold a per PF filter control settings */ -struct i40e_filter_control_settings { - /* number of PE Quad Hash filter buckets */ - enum i40e_hash_filter_size pe_filt_num; - /* number of PE Quad Hash contexts */ - enum i40e_dma_cntx_size pe_cntx_num; - /* number of FCoE filter buckets */ - enum i40e_hash_filter_size fcoe_filt_num; - /* number of FCoE DDP contexts */ - enum i40e_dma_cntx_size fcoe_cntx_num; - /* size of the Hash LUT */ - enum i40e_hash_lut_size hash_lut_size; - /* enable FDIR filters for PF and its VFs */ - bool enable_fdir; - /* enable Ethertype filters for PF and its VFs */ - bool enable_ethtype; - /* enable MAC/VLAN filters for PF and its VFs */ - bool enable_macvlan; -}; - -/* Structure to hold device level control filter counts */ -struct i40e_control_filter_stats { - u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ - u16 etype_used; /* Used perfect EtherType filters */ - u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ - u16 etype_free; /* Un-used perfect EtherType filters */ -}; - -enum i40e_reset_type { - I40E_RESET_POR = 0, - I40E_RESET_CORER = 1, - I40E_RESET_GLOBR = 2, - I40E_RESET_EMPR = 3, -}; - -/* IEEE 802.1AB LLDP Agent Variables from NVM */ -#define I40E_NVM_LLDP_CFG_PTR 0x06 -#define I40E_SR_LLDP_CFG_PTR 0x31 - -/* RSS Hash Table Size */ -#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 - -/* INPUT SET MASK for RSS, flow director and flexible payload */ -#define I40E_FD_INSET_L3_SRC_SHIFT 47 -#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_L3_SRC_SHIFT) -#define I40E_FD_INSET_L3_DST_SHIFT 35 -#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_L3_DST_SHIFT) -#define I40E_FD_INSET_L4_SRC_SHIFT 34 -#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \ - I40E_FD_INSET_L4_SRC_SHIFT) -#define I40E_FD_INSET_L4_DST_SHIFT 33 -#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \ - I40E_FD_INSET_L4_DST_SHIFT) -#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31 -#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_VERIFY_TAG_SHIFT) - -#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17 -#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD50_SHIFT) -#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16 -#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD51_SHIFT) -#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15 -#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD52_SHIFT) -#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14 -#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD53_SHIFT) -#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13 -#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD54_SHIFT) -#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12 -#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD55_SHIFT) -#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11 -#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD56_SHIFT) -#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10 -#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD57_SHIFT) - -/* Version format for Dynamic Device Personalization(DDP) */ -struct i40e_ddp_version { - u8 major; - u8 minor; - u8 update; - u8 draft; -}; - -#define I40E_DDP_NAME_SIZE 32 - -/* Package header */ -struct i40e_package_header { - struct i40e_ddp_version version; - u32 segment_count; - u32 segment_offset[1]; -}; - -/* Generic segment header */ -struct i40e_generic_seg_header { -#define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_NOTES 0x00000002 -#define SEGMENT_TYPE_I40E 0x00000011 -#define SEGMENT_TYPE_X722 0x00000012 - u32 type; - struct i40e_ddp_version version; - u32 size; - char name[I40E_DDP_NAME_SIZE]; -}; - -struct i40e_metadata_segment { - struct i40e_generic_seg_header header; - struct i40e_ddp_version version; - u32 track_id; - char name[I40E_DDP_NAME_SIZE]; -}; - -struct i40e_device_id_entry { - u32 vendor_dev_id; - u32 sub_vendor_dev_id; -}; - -struct i40e_profile_segment { - struct i40e_generic_seg_header header; - struct i40e_ddp_version version; - char name[I40E_DDP_NAME_SIZE]; - u32 device_table_count; - struct i40e_device_id_entry device_table[1]; -}; - -struct i40e_section_table { - u32 section_count; - u32 section_offset[1]; -}; - -struct i40e_profile_section_header { - u16 tbl_size; - u16 data_end; - struct { -#define SECTION_TYPE_INFO 0x00000010 -#define SECTION_TYPE_MMIO 0x00000800 -#define SECTION_TYPE_AQ 0x00000801 -#define SECTION_TYPE_NOTE 0x80000000 -#define SECTION_TYPE_NAME 0x80000001 - u32 type; - u32 offset; - u32 size; - } section; -}; - -struct i40e_profile_info { - u32 track_id; - struct i40e_ddp_version version; - u8 op; -#define I40E_DDP_ADD_TRACKID 0x01 -#define I40E_DDP_REMOVE_TRACKID 0x02 - u8 reserved[7]; - u8 name[I40E_DDP_NAME_SIZE]; -}; -#endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h deleted file mode 100644 index 96e537a35000..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ /dev/null @@ -1,427 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40EVF_H_ -#define _I40EVF_H_ - -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/aer.h> -#include <linux/netdevice.h> -#include <linux/vmalloc.h> -#include <linux/interrupt.h> -#include <linux/ethtool.h> -#include <linux/if_vlan.h> -#include <linux/ip.h> -#include <linux/tcp.h> -#include <linux/sctp.h> -#include <linux/ipv6.h> -#include <linux/kernel.h> -#include <linux/bitops.h> -#include <linux/timer.h> -#include <linux/workqueue.h> -#include <linux/wait.h> -#include <linux/delay.h> -#include <linux/gfp.h> -#include <linux/skbuff.h> -#include <linux/dma-mapping.h> -#include <linux/etherdevice.h> -#include <linux/socket.h> -#include <linux/jiffies.h> -#include <net/ip6_checksum.h> -#include <net/pkt_cls.h> -#include <net/udp.h> -#include <net/tc_act/tc_gact.h> -#include <net/tc_act/tc_mirred.h> - -#include "i40e_type.h" -#include <linux/avf/virtchnl.h> -#include "i40e_txrx.h" - -#define DEFAULT_DEBUG_LEVEL_SHIFT 3 -#define PFX "i40evf: " - -/* VSI state flags shared with common code */ -enum i40evf_vsi_state_t { - __I40E_VSI_DOWN, - /* This must be last as it determines the size of the BITMAP */ - __I40E_VSI_STATE_SIZE__, -}; - -/* dummy struct to make common code less painful */ -struct i40e_vsi { - struct i40evf_adapter *back; - struct net_device *netdev; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - u16 seid; - u16 id; - DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__); - int base_vector; - u16 work_limit; - u16 qs_handle; - void *priv; /* client driver data reference. */ -}; - -/* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define I40EVF_DEFAULT_TXD 512 -#define I40EVF_DEFAULT_RXD 512 -#define I40EVF_MAX_TXD 4096 -#define I40EVF_MIN_TXD 64 -#define I40EVF_MAX_RXD 4096 -#define I40EVF_MIN_RXD 64 -#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 -#define I40EVF_MAX_AQ_BUF_SIZE 4096 -#define I40EVF_AQ_LEN 32 -#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ - -#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) - -#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) -#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) -#define I40E_TX_CTXTDESC(R, i) \ - (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) -#define I40EVF_MAX_REQ_QUEUES 4 - -#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) -#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4) -#define I40EVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ - -/* MAX_MSIX_Q_VECTORS of these are allocated, - * but we only use one per queue-specific vector. - */ -struct i40e_q_vector { - struct i40evf_adapter *adapter; - struct i40e_vsi *vsi; - struct napi_struct napi; - struct i40e_ring_container rx; - struct i40e_ring_container tx; - u32 ring_mask; - u8 itr_countdown; /* when 0 should adjust adaptive ITR */ - u8 num_ringpairs; /* total number of ring pairs in vector */ - u16 v_idx; /* index in the vsi->q_vector array. */ - u16 reg_idx; /* register index of the interrupt */ - char name[IFNAMSIZ + 15]; - bool arm_wb_state; - cpumask_t affinity_mask; - struct irq_affinity_notify affinity_notify; -}; - -/* Helper macros to switch between ints/sec and what the register uses. - * And yes, it's the same math going both ways. The lowest value - * supported by all of the i40e hardware is 8. - */ -#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ - ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) -#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG - -#define I40EVF_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) - -#define I40EVF_RX_DESC_ADV(R, i) \ - (&(((union i40e_adv_rx_desc *)((R).desc))[i])) -#define I40EVF_TX_DESC_ADV(R, i) \ - (&(((union i40e_adv_tx_desc *)((R).desc))[i])) -#define I40EVF_TX_CTXTDESC_ADV(R, i) \ - (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i])) - -#define OTHER_VECTOR 1 -#define NONQ_VECS (OTHER_VECTOR) - -#define MIN_MSIX_Q_VECTORS 1 -#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS) - -#define I40EVF_QUEUE_END_OF_LIST 0x7FF -#define I40EVF_FREE_VECTOR 0x7FFF -struct i40evf_mac_filter { - struct list_head list; - u8 macaddr[ETH_ALEN]; - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ -}; - -struct i40evf_vlan_filter { - struct list_head list; - u16 vlan; - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ -}; - -#define I40EVF_MAX_TRAFFIC_CLASS 4 -/* State of traffic class creation */ -enum i40evf_tc_state_t { - __I40EVF_TC_INVALID, /* no traffic class, default state */ - __I40EVF_TC_RUNNING, /* traffic classes have been created */ -}; - -/* channel info */ -struct i40evf_channel_config { - struct virtchnl_channel_info ch_info[I40EVF_MAX_TRAFFIC_CLASS]; - enum i40evf_tc_state_t state; - u8 total_qps; -}; - -/* State of cloud filter */ -enum i40evf_cloud_filter_state_t { - __I40EVF_CF_INVALID, /* cloud filter not added */ - __I40EVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */ - __I40EVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */ - __I40EVF_CF_ACTIVE, /* cloud filter is active */ -}; - -/* Driver state. The order of these is important! */ -enum i40evf_state_t { - __I40EVF_STARTUP, /* driver loaded, probe complete */ - __I40EVF_REMOVE, /* driver is being unloaded */ - __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ - __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ - __I40EVF_INIT_SW, /* got resources, setting up structs */ - __I40EVF_RESETTING, /* in reset */ - /* Below here, watchdog is running */ - __I40EVF_DOWN, /* ready, can be opened */ - __I40EVF_DOWN_PENDING, /* descending, waiting for watchdog */ - __I40EVF_TESTING, /* in ethtool self-test */ - __I40EVF_RUNNING, /* opened, working */ -}; - -enum i40evf_critical_section_t { - __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ - __I40EVF_IN_CLIENT_TASK, - __I40EVF_IN_REMOVE_TASK, /* device being removed */ -}; - -#define I40EVF_CLOUD_FIELD_OMAC 0x01 -#define I40EVF_CLOUD_FIELD_IMAC 0x02 -#define I40EVF_CLOUD_FIELD_IVLAN 0x04 -#define I40EVF_CLOUD_FIELD_TEN_ID 0x08 -#define I40EVF_CLOUD_FIELD_IIP 0x10 - -#define I40EVF_CF_FLAGS_OMAC I40EVF_CLOUD_FIELD_OMAC -#define I40EVF_CF_FLAGS_IMAC I40EVF_CLOUD_FIELD_IMAC -#define I40EVF_CF_FLAGS_IMAC_IVLAN (I40EVF_CLOUD_FIELD_IMAC |\ - I40EVF_CLOUD_FIELD_IVLAN) -#define I40EVF_CF_FLAGS_IMAC_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\ - I40EVF_CLOUD_FIELD_TEN_ID) -#define I40EVF_CF_FLAGS_OMAC_TEN_ID_IMAC (I40EVF_CLOUD_FIELD_OMAC |\ - I40EVF_CLOUD_FIELD_IMAC |\ - I40EVF_CLOUD_FIELD_TEN_ID) -#define I40EVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\ - I40EVF_CLOUD_FIELD_IVLAN |\ - I40EVF_CLOUD_FIELD_TEN_ID) -#define I40EVF_CF_FLAGS_IIP I40E_CLOUD_FIELD_IIP - -/* bookkeeping of cloud filters */ -struct i40evf_cloud_filter { - enum i40evf_cloud_filter_state_t state; - struct list_head list; - struct virtchnl_filter f; - unsigned long cookie; - bool del; /* filter needs to be deleted */ - bool add; /* filter needs to be added */ -}; - -/* board specific private data structure */ -struct i40evf_adapter { - struct timer_list watchdog_timer; - struct work_struct reset_task; - struct work_struct adminq_task; - struct delayed_work client_task; - struct delayed_work init_task; - wait_queue_head_t down_waitqueue; - struct i40e_q_vector *q_vectors; - struct list_head vlan_filter_list; - struct list_head mac_filter_list; - /* Lock to protect accesses to MAC and VLAN lists */ - spinlock_t mac_vlan_list_lock; - char misc_vector_name[IFNAMSIZ + 9]; - int num_active_queues; - int num_req_queues; - - /* TX */ - struct i40e_ring *tx_rings; - u32 tx_timeout_count; - u32 tx_desc_count; - - /* RX */ - struct i40e_ring *rx_rings; - u64 hw_csum_rx_error; - u32 rx_desc_count; - int num_msix_vectors; - int num_iwarp_msix; - int iwarp_base_vector; - u32 client_pending; - struct i40e_client_instance *cinst; - struct msix_entry *msix_entries; - - u32 flags; -#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40EVF_FLAG_PF_COMMS_FAILED BIT(3) -#define I40EVF_FLAG_RESET_PENDING BIT(4) -#define I40EVF_FLAG_RESET_NEEDED BIT(5) -#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) -#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(8) -#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) -#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) -#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11) -#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12) -#define I40EVF_FLAG_PROMISC_ON BIT(13) -#define I40EVF_FLAG_ALLMULTI_ON BIT(14) -#define I40EVF_FLAG_LEGACY_RX BIT(15) -#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16) -#define I40EVF_FLAG_QUEUES_DISABLED BIT(17) -/* duplicates for common code */ -#define I40E_FLAG_DCB_ENABLED 0 -#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED -#define I40E_FLAG_LEGACY_RX I40EVF_FLAG_LEGACY_RX - /* flags for admin queue service task */ - u32 aq_required; -#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0) -#define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1) -#define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2) -#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3) -#define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4) -#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5) -#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) -#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) -#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) -#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ -#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) -/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ -#define I40EVF_FLAG_AQ_GET_HENA BIT(11) -#define I40EVF_FLAG_AQ_SET_HENA BIT(12) -#define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13) -#define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14) -#define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15) -#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16) -#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) -#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) -#define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) -#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) -#define I40EVF_FLAG_AQ_ENABLE_CHANNELS BIT(21) -#define I40EVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) -#define I40EVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) -#define I40EVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) - - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - - struct i40e_hw hw; /* defined in i40e_type.h */ - - enum i40evf_state_t state; - unsigned long crit_section; - - struct work_struct watchdog_task; - bool netdev_registered; - bool link_up; - enum virtchnl_link_speed link_speed; - enum virtchnl_ops current_op; -#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ - (_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_IWARP : \ - 0) -#define CLIENT_ENABLED(_a) ((_a)->cinst) -/* RSS by the PF should be preferred over RSS via other methods. */ -#define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_RSS_PF) -#define RSS_AQ(_a) ((_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_RSS_AQ) -#define RSS_REG(_a) (!((_a)->vf_res->vf_cap_flags & \ - (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ - VIRTCHNL_VF_OFFLOAD_RSS_PF))) -#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_VLAN) - struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ - struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ - struct virtchnl_version_info pf_version; -#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ - ((_a)->pf_version.minor == 1)) - u16 msg_enable; - struct i40e_eth_stats current_stats; - struct i40e_vsi vsi; - u32 aq_wait_count; - /* RSS stuff */ - u64 hena; - u16 rss_key_size; - u16 rss_lut_size; - u8 *rss_key; - u8 *rss_lut; - /* ADQ related members */ - struct i40evf_channel_config ch_config; - u8 num_tc; - struct list_head cloud_filter_list; - /* lock to protest access to the cloud filter list */ - spinlock_t cloud_filter_list_lock; - u16 num_cloud_filters; -}; - - -/* Ethtool Private Flags */ - -/* lan device */ -struct i40e_device { - struct list_head list; - struct i40evf_adapter *vf; -}; - -/* needed by i40evf_ethtool.c */ -extern char i40evf_driver_name[]; -extern const char i40evf_driver_version[]; - -int i40evf_up(struct i40evf_adapter *adapter); -void i40evf_down(struct i40evf_adapter *adapter); -int i40evf_process_config(struct i40evf_adapter *adapter); -void i40evf_schedule_reset(struct i40evf_adapter *adapter); -void i40evf_reset(struct i40evf_adapter *adapter); -void i40evf_set_ethtool_ops(struct net_device *netdev); -void i40evf_update_stats(struct i40evf_adapter *adapter); -void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter); -int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter); -void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask); -void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter); -void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter); - -void i40e_napi_add_all(struct i40evf_adapter *adapter); -void i40e_napi_del_all(struct i40evf_adapter *adapter); - -int i40evf_send_api_ver(struct i40evf_adapter *adapter); -int i40evf_verify_api_ver(struct i40evf_adapter *adapter); -int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter); -int i40evf_get_vf_config(struct i40evf_adapter *adapter); -void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush); -void i40evf_configure_queues(struct i40evf_adapter *adapter); -void i40evf_deconfigure_queues(struct i40evf_adapter *adapter); -void i40evf_enable_queues(struct i40evf_adapter *adapter); -void i40evf_disable_queues(struct i40evf_adapter *adapter); -void i40evf_map_queues(struct i40evf_adapter *adapter); -int i40evf_request_queues(struct i40evf_adapter *adapter, int num); -void i40evf_add_ether_addrs(struct i40evf_adapter *adapter); -void i40evf_del_ether_addrs(struct i40evf_adapter *adapter); -void i40evf_add_vlans(struct i40evf_adapter *adapter); -void i40evf_del_vlans(struct i40evf_adapter *adapter); -void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); -void i40evf_request_stats(struct i40evf_adapter *adapter); -void i40evf_request_reset(struct i40evf_adapter *adapter); -void i40evf_get_hena(struct i40evf_adapter *adapter); -void i40evf_set_hena(struct i40evf_adapter *adapter); -void i40evf_set_rss_key(struct i40evf_adapter *adapter); -void i40evf_set_rss_lut(struct i40evf_adapter *adapter); -void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter); -void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter); -void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, - enum virtchnl_ops v_opcode, - i40e_status v_retval, u8 *msg, u16 msglen); -int i40evf_config_rss(struct i40evf_adapter *adapter); -int i40evf_lan_add_device(struct i40evf_adapter *adapter); -int i40evf_lan_del_device(struct i40evf_adapter *adapter); -void i40evf_client_subtask(struct i40evf_adapter *adapter); -void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len); -void i40evf_notify_client_l2_params(struct i40e_vsi *vsi); -void i40evf_notify_client_open(struct i40e_vsi *vsi); -void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset); -void i40evf_enable_channels(struct i40evf_adapter *adapter); -void i40evf_disable_channels(struct i40evf_adapter *adapter); -void i40evf_add_cloud_filter(struct i40evf_adapter *adapter); -void i40evf_del_cloud_filter(struct i40evf_adapter *adapter); -#endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c deleted file mode 100644 index 69efe0aec76a..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ /dev/null @@ -1,820 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -/* ethtool support for i40evf */ -#include "i40evf.h" - -#include <linux/uaccess.h> - -struct i40evf_stats { - char stat_string[ETH_GSTRING_LEN]; - int stat_offset; -}; - -#define I40EVF_STAT(_name, _stat) { \ - .stat_string = _name, \ - .stat_offset = offsetof(struct i40evf_adapter, _stat) \ -} - -/* All stats are u64, so we don't need to track the size of the field. */ -static const struct i40evf_stats i40evf_gstrings_stats[] = { - I40EVF_STAT("rx_bytes", current_stats.rx_bytes), - I40EVF_STAT("rx_unicast", current_stats.rx_unicast), - I40EVF_STAT("rx_multicast", current_stats.rx_multicast), - I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast), - I40EVF_STAT("rx_discards", current_stats.rx_discards), - I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), - I40EVF_STAT("tx_bytes", current_stats.tx_bytes), - I40EVF_STAT("tx_unicast", current_stats.tx_unicast), - I40EVF_STAT("tx_multicast", current_stats.tx_multicast), - I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast), - I40EVF_STAT("tx_discards", current_stats.tx_discards), - I40EVF_STAT("tx_errors", current_stats.tx_errors), -}; - -#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) -#define I40EVF_QUEUE_STATS_LEN(_dev) \ - (((struct i40evf_adapter *)\ - netdev_priv(_dev))->num_active_queues \ - * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) -#define I40EVF_STATS_LEN(_dev) \ - (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) - -/* For now we have one and only one private flag and it is only defined - * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead - * of leaving all this code sitting around empty we will strip it unless - * our one private flag is actually available. - */ -struct i40evf_priv_flags { - char flag_string[ETH_GSTRING_LEN]; - u32 flag; - bool read_only; -}; - -#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \ - .flag_string = _name, \ - .flag = _flag, \ - .read_only = _read_only, \ -} - -static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = { - I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0), -}; - -#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags) - -/** - * i40evf_get_link_ksettings - Get Link Speed and Duplex settings - * @netdev: network interface device structure - * @cmd: ethtool command - * - * Reports speed/duplex settings. Because this is a VF, we don't know what - * kind of link we really have, so we fake it. - **/ -static int i40evf_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - ethtool_link_ksettings_zero_link_mode(cmd, supported); - cmd->base.autoneg = AUTONEG_DISABLE; - cmd->base.port = PORT_NONE; - /* Set speed and duplex */ - switch (adapter->link_speed) { - case I40E_LINK_SPEED_40GB: - cmd->base.speed = SPEED_40000; - break; - case I40E_LINK_SPEED_25GB: -#ifdef SPEED_25000 - cmd->base.speed = SPEED_25000; -#else - netdev_info(netdev, - "Speed is 25G, display not supported by this version of ethtool.\n"); -#endif - break; - case I40E_LINK_SPEED_20GB: - cmd->base.speed = SPEED_20000; - break; - case I40E_LINK_SPEED_10GB: - cmd->base.speed = SPEED_10000; - break; - case I40E_LINK_SPEED_1GB: - cmd->base.speed = SPEED_1000; - break; - case I40E_LINK_SPEED_100MB: - cmd->base.speed = SPEED_100; - break; - default: - break; - } - cmd->base.duplex = DUPLEX_FULL; - - return 0; -} - -/** - * i40evf_get_sset_count - Get length of string set - * @netdev: network interface device structure - * @sset: id of string set - * - * Reports size of string table. This driver only supports - * strings for statistics. - **/ -static int i40evf_get_sset_count(struct net_device *netdev, int sset) -{ - if (sset == ETH_SS_STATS) - return I40EVF_STATS_LEN(netdev); - else if (sset == ETH_SS_PRIV_FLAGS) - return I40EVF_PRIV_FLAGS_STR_LEN; - else - return -EINVAL; -} - -/** - * i40evf_get_ethtool_stats - report device statistics - * @netdev: network interface device structure - * @stats: ethtool statistics structure - * @data: pointer to data buffer - * - * All statistics are added to the data buffer as an array of u64. - **/ -static void i40evf_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - unsigned int i, j; - char *p; - - for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { - p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset; - data[i] = *(u64 *)p; - } - for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->tx_rings[j].stats.packets; - data[i++] = adapter->tx_rings[j].stats.bytes; - } - for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->rx_rings[j].stats.packets; - data[i++] = adapter->rx_rings[j].stats.bytes; - } -} - -/** - * i40evf_get_strings - Get string set - * @netdev: network interface device structure - * @sset: id of string set - * @data: buffer for string data - * - * Builds stats string table. - **/ -static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u8 *p = data; - int i; - - if (sset == ETH_SS_STATS) { - for (i = 0; i < (int)I40EVF_GLOBAL_STATS_LEN; i++) { - memcpy(p, i40evf_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_active_queues; i++) { - snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_active_queues; i++) { - snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); - p += ETH_GSTRING_LEN; - } - } else if (sset == ETH_SS_PRIV_FLAGS) { - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40evf_gstrings_priv_flags[i].flag_string); - p += ETH_GSTRING_LEN; - } - } -} - -/** - * i40evf_get_priv_flags - report device private flags - * @netdev: network interface device structure - * - * The get string set count and the string set should be matched for each - * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags - * array. - * - * Returns a u32 bitmap of flags. - **/ -static u32 i40evf_get_priv_flags(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u32 i, ret_flags = 0; - - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - const struct i40evf_priv_flags *priv_flags; - - priv_flags = &i40evf_gstrings_priv_flags[i]; - - if (priv_flags->flag & adapter->flags) - ret_flags |= BIT(i); - } - - return ret_flags; -} - -/** - * i40evf_set_priv_flags - set private flags - * @netdev: network interface device structure - * @flags: bit flags to be set - **/ -static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u32 orig_flags, new_flags, changed_flags; - u32 i; - - orig_flags = READ_ONCE(adapter->flags); - new_flags = orig_flags; - - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - const struct i40evf_priv_flags *priv_flags; - - priv_flags = &i40evf_gstrings_priv_flags[i]; - - if (flags & BIT(i)) - new_flags |= priv_flags->flag; - else - new_flags &= ~(priv_flags->flag); - - if (priv_flags->read_only && - ((orig_flags ^ new_flags) & ~BIT(i))) - return -EOPNOTSUPP; - } - - /* Before we finalize any flag changes, any checks which we need to - * perform to determine if the new flags will be supported should go - * here... - */ - - /* Compare and exchange the new flags into place. If we failed, that - * is if cmpxchg returns anything but the old value, this means - * something else must have modified the flags variable since we - * copied it. We'll just punt with an error and log something in the - * message buffer. - */ - if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) { - dev_warn(&adapter->pdev->dev, - "Unable to update adapter->flags as it was modified by another thread...\n"); - return -EAGAIN; - } - - changed_flags = orig_flags ^ new_flags; - - /* Process any additional changes needed as a result of flag changes. - * The changed_flags value reflects the list of bits that were changed - * in the code above. - */ - - /* issue a reset to force legacy-rx change to take effect */ - if (changed_flags & I40EVF_FLAG_LEGACY_RX) { - if (netif_running(netdev)) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); - } - } - - return 0; -} - -/** - * i40evf_get_msglevel - Get debug message level - * @netdev: network interface device structure - * - * Returns current debug message level. - **/ -static u32 i40evf_get_msglevel(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - return adapter->msg_enable; -} - -/** - * i40evf_set_msglevel - Set debug message level - * @netdev: network interface device structure - * @data: message level - * - * Set current debug message level. Higher values cause the driver to - * be noisier. - **/ -static void i40evf_set_msglevel(struct net_device *netdev, u32 data) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - if (I40E_DEBUG_USER & data) - adapter->hw.debug_mask = data; - adapter->msg_enable = data; -} - -/** - * i40evf_get_drvinfo - Get driver info - * @netdev: network interface device structure - * @drvinfo: ethool driver info structure - * - * Returns information about the driver and device for display to the user. - **/ -static void i40evf_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - strlcpy(drvinfo->driver, i40evf_driver_name, 32); - strlcpy(drvinfo->version, i40evf_driver_version, 32); - strlcpy(drvinfo->fw_version, "N/A", 4); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN; -} - -/** - * i40evf_get_ringparam - Get ring parameters - * @netdev: network interface device structure - * @ring: ethtool ringparam structure - * - * Returns current ring parameters. TX and RX rings are reported separately, - * but the number of rings is not reported. - **/ -static void i40evf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - ring->rx_max_pending = I40EVF_MAX_RXD; - ring->tx_max_pending = I40EVF_MAX_TXD; - ring->rx_pending = adapter->rx_desc_count; - ring->tx_pending = adapter->tx_desc_count; -} - -/** - * i40evf_set_ringparam - Set ring parameters - * @netdev: network interface device structure - * @ring: ethtool ringparam structure - * - * Sets ring parameters. TX and RX rings are controlled separately, but the - * number of rings is not specified, so all rings get the same settings. - **/ -static int i40evf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u32 new_rx_count, new_tx_count; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - new_tx_count = clamp_t(u32, ring->tx_pending, - I40EVF_MIN_TXD, - I40EVF_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); - - new_rx_count = clamp_t(u32, ring->rx_pending, - I40EVF_MIN_RXD, - I40EVF_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); - - /* if nothing to do return success */ - if ((new_tx_count == adapter->tx_desc_count) && - (new_rx_count == adapter->rx_desc_count)) - return 0; - - adapter->tx_desc_count = new_tx_count; - adapter->rx_desc_count = new_rx_count; - - if (netif_running(netdev)) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); - } - - return 0; -} - -/** - * __i40evf_get_coalesce - get per-queue coalesce settings - * @netdev: the netdev to check - * @ec: ethtool coalesce data structure - * @queue: which queue to pick - * - * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs - * are per queue. If queue is <0 then we default to queue 0 as the - * representative value. - **/ -static int __i40evf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec, - int queue) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_vsi *vsi = &adapter->vsi; - struct i40e_ring *rx_ring, *tx_ring; - - ec->tx_max_coalesced_frames = vsi->work_limit; - ec->rx_max_coalesced_frames = vsi->work_limit; - - /* Rx and Tx usecs per queue value. If user doesn't specify the - * queue, return queue 0's value to represent. - */ - if (queue < 0) - queue = 0; - else if (queue >= adapter->num_active_queues) - return -EINVAL; - - rx_ring = &adapter->rx_rings[queue]; - tx_ring = &adapter->tx_rings[queue]; - - if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) - ec->use_adaptive_rx_coalesce = 1; - - if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) - ec->use_adaptive_tx_coalesce = 1; - - ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC; - ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC; - - return 0; -} - -/** - * i40evf_get_coalesce - Get interrupt coalescing settings - * @netdev: network interface device structure - * @ec: ethtool coalesce structure - * - * Returns current coalescing settings. This is referred to elsewhere in the - * driver as Interrupt Throttle Rate, as this is how the hardware describes - * this functionality. Note that if per-queue settings have been modified this - * only represents the settings of queue 0. - **/ -static int i40evf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - return __i40evf_get_coalesce(netdev, ec, -1); -} - -/** - * i40evf_get_per_queue_coalesce - get coalesce values for specific queue - * @netdev: netdev to read - * @ec: coalesce settings from ethtool - * @queue: the queue to read - * - * Read specific queue's coalesce settings. - **/ -static int i40evf_get_per_queue_coalesce(struct net_device *netdev, - u32 queue, - struct ethtool_coalesce *ec) -{ - return __i40evf_get_coalesce(netdev, ec, queue); -} - -/** - * i40evf_set_itr_per_queue - set ITR values for specific queue - * @adapter: the VF adapter struct to set values for - * @ec: coalesce settings from ethtool - * @queue: the queue to modify - * - * Change the ITR settings for a specific queue. - **/ -static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter, - struct ethtool_coalesce *ec, - int queue) -{ - struct i40e_ring *rx_ring = &adapter->rx_rings[queue]; - struct i40e_ring *tx_ring = &adapter->tx_rings[queue]; - struct i40e_q_vector *q_vector; - - rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); - tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); - - rx_ring->itr_setting |= I40E_ITR_DYNAMIC; - if (!ec->use_adaptive_rx_coalesce) - rx_ring->itr_setting ^= I40E_ITR_DYNAMIC; - - tx_ring->itr_setting |= I40E_ITR_DYNAMIC; - if (!ec->use_adaptive_tx_coalesce) - tx_ring->itr_setting ^= I40E_ITR_DYNAMIC; - - q_vector = rx_ring->q_vector; - q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); - - q_vector = tx_ring->q_vector; - q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); - - /* The interrupt handler itself will take care of programming - * the Tx and Rx ITR values based on the values we have entered - * into the q_vector, no need to write the values now. - */ -} - -/** - * __i40evf_set_coalesce - set coalesce settings for particular queue - * @netdev: the netdev to change - * @ec: ethtool coalesce settings - * @queue: the queue to change - * - * Sets the coalesce settings for a particular queue. - **/ -static int __i40evf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec, - int queue) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_vsi *vsi = &adapter->vsi; - int i; - - if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) - vsi->work_limit = ec->tx_max_coalesced_frames_irq; - - if (ec->rx_coalesce_usecs == 0) { - if (ec->use_adaptive_rx_coalesce) - netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); - } else if ((ec->rx_coalesce_usecs < I40E_MIN_ITR) || - (ec->rx_coalesce_usecs > I40E_MAX_ITR)) { - netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); - return -EINVAL; - } - - else - if (ec->tx_coalesce_usecs == 0) { - if (ec->use_adaptive_tx_coalesce) - netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); - } else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) || - (ec->tx_coalesce_usecs > I40E_MAX_ITR)) { - netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); - return -EINVAL; - } - - /* Rx and Tx usecs has per queue value. If user doesn't specify the - * queue, apply to all queues. - */ - if (queue < 0) { - for (i = 0; i < adapter->num_active_queues; i++) - i40evf_set_itr_per_queue(adapter, ec, i); - } else if (queue < adapter->num_active_queues) { - i40evf_set_itr_per_queue(adapter, ec, queue); - } else { - netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", - adapter->num_active_queues - 1); - return -EINVAL; - } - - return 0; -} - -/** - * i40evf_set_coalesce - Set interrupt coalescing settings - * @netdev: network interface device structure - * @ec: ethtool coalesce structure - * - * Change current coalescing settings for every queue. - **/ -static int i40evf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - return __i40evf_set_coalesce(netdev, ec, -1); -} - -/** - * i40evf_set_per_queue_coalesce - set specific queue's coalesce settings - * @netdev: the netdev to change - * @ec: ethtool's coalesce settings - * @queue: the queue to modify - * - * Modifies a specific queue's coalesce settings. - */ -static int i40evf_set_per_queue_coalesce(struct net_device *netdev, - u32 queue, - struct ethtool_coalesce *ec) -{ - return __i40evf_set_coalesce(netdev, ec, queue); -} - -/** - * i40evf_get_rxnfc - command to get RX flow classification rules - * @netdev: network interface device structure - * @cmd: ethtool rxnfc command - * @rule_locs: pointer to store rule locations - * - * Returns Success if the command is supported. - **/ -static int i40evf_get_rxnfc(struct net_device *netdev, - struct ethtool_rxnfc *cmd, - u32 *rule_locs) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = adapter->num_active_queues; - ret = 0; - break; - case ETHTOOL_GRXFH: - netdev_info(netdev, - "RSS hash info is not available to vf, use pf.\n"); - break; - default: - break; - } - - return ret; -} -/** - * i40evf_get_channels: get the number of channels supported by the device - * @netdev: network interface device structure - * @ch: channel information structure - * - * For the purposes of our device, we only use combined channels, i.e. a tx/rx - * queue pair. Report one extra channel to match our "other" MSI-X vector. - **/ -static void i40evf_get_channels(struct net_device *netdev, - struct ethtool_channels *ch) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - /* Report maximum channels */ - ch->max_combined = I40EVF_MAX_REQ_QUEUES; - - ch->max_other = NONQ_VECS; - ch->other_count = NONQ_VECS; - - ch->combined_count = adapter->num_active_queues; -} - -/** - * i40evf_set_channels: set the new channel count - * @netdev: network interface device structure - * @ch: channel information structure - * - * Negotiate a new number of channels with the PF then do a reset. During - * reset we'll realloc queues and fix the RSS table. Returns 0 on success, - * negative on failure. - **/ -static int i40evf_set_channels(struct net_device *netdev, - struct ethtool_channels *ch) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - int num_req = ch->combined_count; - - if (num_req != adapter->num_active_queues && - !(adapter->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { - dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n"); - return -EINVAL; - } - - if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && - adapter->num_tc) { - dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n"); - return -EINVAL; - } - - /* All of these should have already been checked by ethtool before this - * even gets to us, but just to be sure. - */ - if (num_req <= 0 || num_req > I40EVF_MAX_REQ_QUEUES) - return -EINVAL; - - if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) - return -EINVAL; - - adapter->num_req_queues = num_req; - return i40evf_request_queues(adapter, num_req); -} - -/** - * i40evf_get_rxfh_key_size - get the RSS hash key size - * @netdev: network interface device structure - * - * Returns the table size. - **/ -static u32 i40evf_get_rxfh_key_size(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - return adapter->rss_key_size; -} - -/** - * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size - * @netdev: network interface device structure - * - * Returns the table size. - **/ -static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - return adapter->rss_lut_size; -} - -/** - * i40evf_get_rxfh - get the rx flow hash indirection table - * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function in use - * - * Reads the indirection table directly from the hardware. Always returns 0. - **/ -static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u16 i; - - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - if (!indir) - return 0; - - memcpy(key, adapter->rss_key, adapter->rss_key_size); - - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - indir[i] = (u32)adapter->rss_lut[i]; - - return 0; -} - -/** - * i40evf_set_rxfh - set the rx flow hash indirection table - * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function to use - * - * Returns -EINVAL if the table specifies an inavlid queue id, otherwise - * returns 0 after programming the table. - **/ -static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u16 i; - - /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) - return -EOPNOTSUPP; - if (!indir) - return 0; - - if (key) { - memcpy(adapter->rss_key, key, adapter->rss_key_size); - } - - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - adapter->rss_lut[i] = (u8)(indir[i]); - - return i40evf_config_rss(adapter); -} - -static const struct ethtool_ops i40evf_ethtool_ops = { - .get_drvinfo = i40evf_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_ringparam = i40evf_get_ringparam, - .set_ringparam = i40evf_set_ringparam, - .get_strings = i40evf_get_strings, - .get_ethtool_stats = i40evf_get_ethtool_stats, - .get_sset_count = i40evf_get_sset_count, - .get_priv_flags = i40evf_get_priv_flags, - .set_priv_flags = i40evf_set_priv_flags, - .get_msglevel = i40evf_get_msglevel, - .set_msglevel = i40evf_set_msglevel, - .get_coalesce = i40evf_get_coalesce, - .set_coalesce = i40evf_set_coalesce, - .get_per_queue_coalesce = i40evf_get_per_queue_coalesce, - .set_per_queue_coalesce = i40evf_set_per_queue_coalesce, - .get_rxnfc = i40evf_get_rxnfc, - .get_rxfh_indir_size = i40evf_get_rxfh_indir_size, - .get_rxfh = i40evf_get_rxfh, - .set_rxfh = i40evf_set_rxfh, - .get_channels = i40evf_get_channels, - .set_channels = i40evf_set_channels, - .get_rxfh_key_size = i40evf_get_rxfh_key_size, - .get_link_ksettings = i40evf_get_link_ksettings, -}; - -/** - * i40evf_set_ethtool_ops - Initialize ethtool ops struct - * @netdev: network interface device structure - * - * Sets ethtool ops struct in our netdev so that ethtool can call - * our functions. - **/ -void i40evf_set_ethtool_ops(struct net_device *netdev) -{ - netdev->ethtool_ops = &i40evf_ethtool_ops; -} diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile new file mode 100644 index 000000000000..9cbb5743ed12 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2013 - 2018 Intel Corporation. +# +# Makefile for the Intel(R) Ethernet Adaptive Virtual Function (iavf) +# driver +# +# + +ccflags-y += -I$(src) +subdir-ccflags-y += -I$(src) + +obj-$(CONFIG_IAVF) += iavf.o + +iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \ + iavf_txrx.o iavf_common.o i40e_adminq.o iavf_client.o diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/iavf/i40e_adminq.c index 21a0dbf6ccf6..fca1ecfd9f71 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.c @@ -1,21 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40e_status.h" -#include "i40e_type.h" -#include "i40e_register.h" +#include "iavf_status.h" +#include "iavf_type.h" +#include "iavf_register.h" #include "i40e_adminq.h" -#include "i40e_prototype.h" - -/** - * i40e_is_nvm_update_op - return true if this is an NVM update operation - * @desc: API request descriptor - **/ -static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) -{ - return (desc->opcode == i40e_aqc_opc_nvm_erase) || - (desc->opcode == i40e_aqc_opc_nvm_update); -} +#include "iavf_prototype.h" /** * i40e_adminq_init_regs - Initialize AdminQ registers @@ -23,44 +13,42 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) * * This assumes the alloc_asq and alloc_arq functions have already been called **/ -static void i40e_adminq_init_regs(struct i40e_hw *hw) +static void i40e_adminq_init_regs(struct iavf_hw *hw) { /* set head and tail registers in our local struct */ - if (i40e_is_vf(hw)) { - hw->aq.asq.tail = I40E_VF_ATQT1; - hw->aq.asq.head = I40E_VF_ATQH1; - hw->aq.asq.len = I40E_VF_ATQLEN1; - hw->aq.asq.bal = I40E_VF_ATQBAL1; - hw->aq.asq.bah = I40E_VF_ATQBAH1; - hw->aq.arq.tail = I40E_VF_ARQT1; - hw->aq.arq.head = I40E_VF_ARQH1; - hw->aq.arq.len = I40E_VF_ARQLEN1; - hw->aq.arq.bal = I40E_VF_ARQBAL1; - hw->aq.arq.bah = I40E_VF_ARQBAH1; - } + hw->aq.asq.tail = IAVF_VF_ATQT1; + hw->aq.asq.head = IAVF_VF_ATQH1; + hw->aq.asq.len = IAVF_VF_ATQLEN1; + hw->aq.asq.bal = IAVF_VF_ATQBAL1; + hw->aq.asq.bah = IAVF_VF_ATQBAH1; + hw->aq.arq.tail = IAVF_VF_ARQT1; + hw->aq.arq.head = IAVF_VF_ARQH1; + hw->aq.arq.len = IAVF_VF_ARQLEN1; + hw->aq.arq.bal = IAVF_VF_ARQBAL1; + hw->aq.arq.bah = IAVF_VF_ARQBAH1; } /** * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) +static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw) { - i40e_status ret_code; + iavf_status ret_code; - ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, + ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, i40e_mem_atq_ring, (hw->aq.num_asq_entries * sizeof(struct i40e_aq_desc)), - I40E_ADMINQ_DESC_ALIGNMENT); + IAVF_ADMINQ_DESC_ALIGNMENT); if (ret_code) return ret_code; - ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, + ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, (hw->aq.num_asq_entries * sizeof(struct i40e_asq_cmd_details))); if (ret_code) { - i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); return ret_code; } @@ -71,15 +59,15 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) +static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw) { - i40e_status ret_code; + iavf_status ret_code; - ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, + ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, i40e_mem_arq_ring, (hw->aq.num_arq_entries * sizeof(struct i40e_aq_desc)), - I40E_ADMINQ_DESC_ALIGNMENT); + IAVF_ADMINQ_DESC_ALIGNMENT); return ret_code; } @@ -91,9 +79,9 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) * This assumes the posted send buffers have already been cleaned * and de-allocated **/ -static void i40e_free_adminq_asq(struct i40e_hw *hw) +static void i40e_free_adminq_asq(struct iavf_hw *hw) { - i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); } /** @@ -103,20 +91,20 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw) * This assumes the posted receive buffers have already been cleaned * and de-allocated **/ -static void i40e_free_adminq_arq(struct i40e_hw *hw) +static void i40e_free_adminq_arq(struct iavf_hw *hw) { - i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); } /** * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) +static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw) { - i40e_status ret_code; struct i40e_aq_desc *desc; - struct i40e_dma_mem *bi; + struct iavf_dma_mem *bi; + iavf_status ret_code; int i; /* We'll be allocating the buffer info memory first, then we can @@ -124,24 +112,25 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) */ /* buffer_info structures do not need alignment */ - ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, - (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); + ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head, + (hw->aq.num_arq_entries * + sizeof(struct iavf_dma_mem))); if (ret_code) goto alloc_arq_bufs; - hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; + hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_arq_entries; i++) { bi = &hw->aq.arq.r.arq_bi[i]; - ret_code = i40e_allocate_dma_mem(hw, bi, + ret_code = iavf_allocate_dma_mem(hw, bi, i40e_mem_arq_buf, hw->aq.arq_buf_size, - I40E_ADMINQ_DESC_ALIGNMENT); + IAVF_ADMINQ_DESC_ALIGNMENT); if (ret_code) goto unwind_alloc_arq_bufs; /* now configure the descriptors for use */ - desc = I40E_ADMINQ_DESC(hw->aq.arq, i); + desc = IAVF_ADMINQ_DESC(hw->aq.arq, i); desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) @@ -169,8 +158,8 @@ unwind_alloc_arq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) - i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); - i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); + iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); return ret_code; } @@ -179,26 +168,27 @@ unwind_alloc_arq_bufs: * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) +static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw) { - i40e_status ret_code; - struct i40e_dma_mem *bi; + struct iavf_dma_mem *bi; + iavf_status ret_code; int i; /* No mapped memory needed yet, just the buffer info structures */ - ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, - (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); + ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head, + (hw->aq.num_asq_entries * + sizeof(struct iavf_dma_mem))); if (ret_code) goto alloc_asq_bufs; - hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; + hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_asq_entries; i++) { bi = &hw->aq.asq.r.asq_bi[i]; - ret_code = i40e_allocate_dma_mem(hw, bi, + ret_code = iavf_allocate_dma_mem(hw, bi, i40e_mem_asq_buf, hw->aq.asq_buf_size, - I40E_ADMINQ_DESC_ALIGNMENT); + IAVF_ADMINQ_DESC_ALIGNMENT); if (ret_code) goto unwind_alloc_asq_bufs; } @@ -209,8 +199,8 @@ unwind_alloc_asq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) - i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); - i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); + iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); return ret_code; } @@ -219,42 +209,42 @@ unwind_alloc_asq_bufs: * i40e_free_arq_bufs - Free receive queue buffer info elements * @hw: pointer to the hardware structure **/ -static void i40e_free_arq_bufs(struct i40e_hw *hw) +static void i40e_free_arq_bufs(struct iavf_hw *hw) { int i; /* free descriptors */ for (i = 0; i < hw->aq.num_arq_entries; i++) - i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); /* free the descriptor memory */ - i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); /* free the dma header */ - i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); + iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); } /** * i40e_free_asq_bufs - Free send queue buffer info elements * @hw: pointer to the hardware structure **/ -static void i40e_free_asq_bufs(struct i40e_hw *hw) +static void i40e_free_asq_bufs(struct iavf_hw *hw) { int i; /* only unmap if the address is non-NULL */ for (i = 0; i < hw->aq.num_asq_entries; i++) if (hw->aq.asq.r.asq_bi[i].pa) - i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); /* free the buffer info list */ - i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); + iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf); /* free the descriptor memory */ - i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); /* free the dma header */ - i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); + iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); } /** @@ -263,9 +253,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw) * * Configure base address and length registers for the transmit queue **/ -static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) +static iavf_status i40e_config_asq_regs(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ @@ -274,7 +264,7 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) /* set starting point */ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | - I40E_VF_ATQLEN1_ATQENABLE_MASK)); + IAVF_VF_ATQLEN1_ATQENABLE_MASK)); wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); @@ -292,9 +282,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) * * Configure base address and length registers for the receive (event queue) **/ -static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) +static iavf_status i40e_config_arq_regs(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ @@ -303,7 +293,7 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) /* set starting point */ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | - I40E_VF_ARQLEN1_ARQENABLE_MASK)); + IAVF_VF_ARQLEN1_ARQENABLE_MASK)); wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); @@ -331,9 +321,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ -static i40e_status i40e_init_asq(struct i40e_hw *hw) +static iavf_status i40e_init_asq(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; if (hw->aq.asq.count > 0) { /* queue already initialized */ @@ -390,9 +380,9 @@ init_adminq_exit: * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ -static i40e_status i40e_init_arq(struct i40e_hw *hw) +static iavf_status i40e_init_arq(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; if (hw->aq.arq.count > 0) { /* queue already initialized */ @@ -442,9 +432,9 @@ init_adminq_exit: * * The main shutdown routine for the Admin Send Queue **/ -static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) +static iavf_status i40e_shutdown_asq(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; mutex_lock(&hw->aq.asq_mutex); @@ -476,9 +466,9 @@ shutdown_asq_out: * * The main shutdown routine for the Admin Receive Queue **/ -static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) +static iavf_status i40e_shutdown_arq(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; mutex_lock(&hw->aq.arq_mutex); @@ -505,7 +495,7 @@ shutdown_arq_out: } /** - * i40evf_init_adminq - main initialization routine for Admin Queue + * iavf_init_adminq - main initialization routine for Admin Queue * @hw: pointer to the hardware structure * * Prior to calling this function, drivers *MUST* set the following fields @@ -515,9 +505,9 @@ shutdown_arq_out: * - hw->aq.arq_buf_size * - hw->aq.asq_buf_size **/ -i40e_status i40evf_init_adminq(struct i40e_hw *hw) +iavf_status iavf_init_adminq(struct iavf_hw *hw) { - i40e_status ret_code; + iavf_status ret_code; /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || @@ -556,22 +546,19 @@ init_adminq_exit: } /** - * i40evf_shutdown_adminq - shutdown routine for the Admin Queue + * iavf_shutdown_adminq - shutdown routine for the Admin Queue * @hw: pointer to the hardware structure **/ -i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) +iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) { - i40e_status ret_code = 0; + iavf_status ret_code = 0; - if (i40evf_check_asq_alive(hw)) - i40evf_aq_queue_shutdown(hw, true); + if (iavf_check_asq_alive(hw)) + iavf_aq_queue_shutdown(hw, true); i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); - if (hw->nvm_buff.va) - i40e_free_virt_mem(hw, &hw->nvm_buff); - return ret_code; } @@ -581,18 +568,18 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) * * returns the number of free desc **/ -static u16 i40e_clean_asq(struct i40e_hw *hw) +static u16 i40e_clean_asq(struct iavf_hw *hw) { - struct i40e_adminq_ring *asq = &(hw->aq.asq); + struct iavf_adminq_ring *asq = &hw->aq.asq; struct i40e_asq_cmd_details *details; u16 ntc = asq->next_to_clean; struct i40e_aq_desc desc_cb; struct i40e_aq_desc *desc; - desc = I40E_ADMINQ_DESC(*asq, ntc); + desc = IAVF_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); while (rd32(hw, hw->aq.asq.head) != ntc) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); if (details->callback) { @@ -607,33 +594,32 @@ static u16 i40e_clean_asq(struct i40e_hw *hw) ntc++; if (ntc == asq->count) ntc = 0; - desc = I40E_ADMINQ_DESC(*asq, ntc); + desc = IAVF_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); } asq->next_to_clean = ntc; - return I40E_DESC_UNUSED(asq); + return IAVF_DESC_UNUSED(asq); } /** - * i40evf_asq_done - check if FW has processed the Admin Send Queue + * iavf_asq_done - check if FW has processed the Admin Send Queue * @hw: pointer to the hw struct * * Returns true if the firmware has processed all descriptors on the * admin send queue. Returns false if there are still requests pending. **/ -bool i40evf_asq_done(struct i40e_hw *hw) +bool iavf_asq_done(struct iavf_hw *hw) { /* AQ designers suggest use of head for better * timing reliability than DD bit */ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; - } /** - * i40evf_asq_send_command - send command to Admin Queue + * iavf_asq_send_command - send command to Admin Queue * @hw: pointer to the hw struct * @desc: prefilled descriptor describing the command (non DMA mem) * @buff: buffer to use for indirect commands @@ -643,24 +629,23 @@ bool i40evf_asq_done(struct i40e_hw *hw) * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc **/ -i40e_status i40evf_asq_send_command(struct i40e_hw *hw, - struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details) +iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) { - i40e_status status = 0; - struct i40e_dma_mem *dma_buff = NULL; + struct iavf_dma_mem *dma_buff = NULL; struct i40e_asq_cmd_details *details; struct i40e_aq_desc *desc_on_ring; bool cmd_completed = false; + iavf_status status = 0; u16 retval = 0; u32 val = 0; mutex_lock(&hw->aq.asq_mutex); if (hw->aq.asq.count == 0) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Admin queue not initialized.\n"); status = I40E_ERR_QUEUE_EMPTY; goto asq_send_command_error; @@ -670,7 +655,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, val = rd32(hw, hw->aq.asq.head); if (val >= hw->aq.num_asq_entries) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); status = I40E_ERR_QUEUE_EMPTY; goto asq_send_command_error; @@ -699,8 +684,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, desc->flags |= cpu_to_le16(details->flags_ena); if (buff_size > hw->aq.asq_buf_size) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, + IAVF_DEBUG_AQ_MESSAGE, "AQTX: Invalid buffer size: %d.\n", buff_size); status = I40E_ERR_INVALID_SIZE; @@ -708,8 +693,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, } if (details->postpone && !details->async) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, + IAVF_DEBUG_AQ_MESSAGE, "AQTX: Async flag not set along with postpone flag"); status = I40E_ERR_PARAM; goto asq_send_command_error; @@ -723,22 +708,22 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, * in case of asynchronous completions */ if (i40e_clean_asq(hw) == 0) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, + IAVF_DEBUG_AQ_MESSAGE, "AQTX: Error queue is full.\n"); status = I40E_ERR_ADMIN_QUEUE_FULL; goto asq_send_command_error; } /* initialize the temp desc pointer with the right desc */ - desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); + desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); /* if the desc is available copy the temp desc to the right place */ *desc_on_ring = *desc; /* if buff is not NULL assume indirect command */ - if (buff != NULL) { - dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); + if (buff) { + dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]; /* copy the user buff into the respective DMA buff */ memcpy(dma_buff->va, buff, buff_size); desc_on_ring->datalen = cpu_to_le16(buff_size); @@ -753,9 +738,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, } /* bump the tail */ - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, - buff, buff_size); + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); + iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring, + buff, buff_size); (hw->aq.asq.next_to_use)++; if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; @@ -772,7 +757,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, /* AQ designers suggest use of head for better * timing reliability than DD bit */ - if (i40evf_asq_done(hw)) + if (iavf_asq_done(hw)) break; udelay(50); total_delay += 50; @@ -780,14 +765,14 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, } /* if ready, copy the desc back to temp */ - if (i40evf_asq_done(hw)) { + if (iavf_asq_done(hw)) { *desc = *desc_on_ring; - if (buff != NULL) + if (buff) memcpy(buff, dma_buff->va, buff_size); retval = le16_to_cpu(desc->retval); if (retval != 0) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, + IAVF_DEBUG_AQ_MESSAGE, "AQTX: Command completed with error 0x%X.\n", retval); @@ -804,10 +789,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; } - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, - buff_size); + iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); /* save writeback aq if requested */ if (details->wb_desc) @@ -816,12 +800,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { - if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; } else { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Writeback timeout.\n"); status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; } @@ -833,14 +817,13 @@ asq_send_command_error: } /** - * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function + * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function * @desc: pointer to the temp descriptor (non DMA mem) * @opcode: the opcode can be used to decide which flags to turn off or on * * Fill the desc with default values **/ -void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, - u16 opcode) +void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode) { /* zero out the desc */ memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); @@ -849,7 +832,7 @@ void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, } /** - * i40evf_clean_arq_element + * iavf_clean_arq_element * @hw: pointer to the hw struct * @e: event info from the receive descriptor, includes any buffers * @pending: number of events that could be left to process @@ -858,14 +841,14 @@ void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, * the contents through e. It can also return how many events are * left to process through 'pending' **/ -i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, - struct i40e_arq_event_info *e, - u16 *pending) +iavf_status iavf_clean_arq_element(struct iavf_hw *hw, + struct i40e_arq_event_info *e, + u16 *pending) { - i40e_status ret_code = 0; u16 ntc = hw->aq.arq.next_to_clean; struct i40e_aq_desc *desc; - struct i40e_dma_mem *bi; + iavf_status ret_code = 0; + struct iavf_dma_mem *bi; u16 desc_idx; u16 datalen; u16 flags; @@ -878,14 +861,14 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, mutex_lock(&hw->aq.arq_mutex); if (hw->aq.arq.count == 0) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: Admin queue not initialized.\n"); ret_code = I40E_ERR_QUEUE_EMPTY; goto clean_arq_element_err; } /* set next_to_use to head */ - ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; + ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; @@ -893,7 +876,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, } /* now clean the next descriptor */ - desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); + desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc); desc_idx = ntc; hw->aq.arq_last_status = @@ -901,8 +884,8 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, flags = le16_to_cpu(desc->flags); if (flags & I40E_AQ_FLAG_ERR) { ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, + iavf_debug(hw, + IAVF_DEBUG_AQ_MESSAGE, "AQRX: Event received with error 0x%X.\n", hw->aq.arq_last_status); } @@ -910,13 +893,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, e->desc = *desc; datalen = le16_to_cpu(desc->datalen); e->msg_len = min(datalen, e->buf_len); - if (e->msg_buf != NULL && (e->msg_len != 0)) + if (e->msg_buf && (e->msg_len != 0)) memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, e->msg_len); - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, - hw->aq.arq_buf_size); + iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); + iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, + hw->aq.arq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message @@ -943,7 +926,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, clean_arq_element_out: /* Set pending if needed, unlock and return */ - if (pending != NULL) + if (pending) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); clean_arq_element_err: @@ -951,17 +934,3 @@ clean_arq_element_err: return ret_code; } - -void i40evf_resume_aq(struct i40e_hw *hw) -{ - /* Registers are reset after PF reset */ - hw->aq.asq.next_to_use = 0; - hw->aq.asq.next_to_clean = 0; - - i40e_config_asq_regs(hw); - - hw->aq.arq.next_to_use = 0; - hw->aq.arq.next_to_clean = 0; - - i40e_config_arq_regs(hw); -} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/iavf/i40e_adminq.h index 1f264b9b6805..ee983889eab0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.h @@ -1,26 +1,26 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#ifndef _I40E_ADMINQ_H_ -#define _I40E_ADMINQ_H_ +#ifndef _IAVF_ADMINQ_H_ +#define _IAVF_ADMINQ_H_ -#include "i40e_osdep.h" -#include "i40e_status.h" +#include "iavf_osdep.h" +#include "iavf_status.h" #include "i40e_adminq_cmd.h" -#define I40E_ADMINQ_DESC(R, i) \ +#define IAVF_ADMINQ_DESC(R, i) \ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) -#define I40E_ADMINQ_DESC_ALIGNMENT 4096 +#define IAVF_ADMINQ_DESC_ALIGNMENT 4096 -struct i40e_adminq_ring { - struct i40e_virt_mem dma_head; /* space for dma structures */ - struct i40e_dma_mem desc_buf; /* descriptor ring memory */ - struct i40e_virt_mem cmd_buf; /* command buffer memory */ +struct iavf_adminq_ring { + struct iavf_virt_mem dma_head; /* space for dma structures */ + struct iavf_dma_mem desc_buf; /* descriptor ring memory */ + struct iavf_virt_mem cmd_buf; /* command buffer memory */ union { - struct i40e_dma_mem *asq_bi; - struct i40e_dma_mem *arq_bi; + struct iavf_dma_mem *asq_bi; + struct iavf_dma_mem *arq_bi; } r; u16 count; /* Number of descriptors */ @@ -61,9 +61,9 @@ struct i40e_arq_event_info { }; /* Admin Queue information */ -struct i40e_adminq_info { - struct i40e_adminq_ring arq; /* receive queue */ - struct i40e_adminq_ring asq; /* send queue */ +struct iavf_adminq_info { + struct iavf_adminq_ring arq; /* receive queue */ + struct iavf_adminq_ring asq; /* send queue */ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/ u16 num_arq_entries; /* receive queue depth */ u16 num_asq_entries; /* send queue depth */ @@ -130,7 +130,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) #define I40E_AQ_LARGE_BUF 512 #define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */ -void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, - u16 opcode); +void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); -#endif /* _I40E_ADMINQ_H_ */ +#endif /* _IAVF_ADMINQ_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h new file mode 100644 index 000000000000..af4f94a6541e --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h @@ -0,0 +1,530 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_ADMINQ_CMD_H_ +#define _I40E_ADMINQ_CMD_H_ + +/* This header file defines the i40e Admin Queue commands and is shared between + * i40e Firmware and Software. Do not change the names in this file to IAVF + * because this file should be diff-able against the i40e version, even + * though many parts have been removed in this VF version. + * + * This file needs to comply with the Linux Kernel coding style. + */ + +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR_X722 0x0005 +#define I40E_FW_API_VERSION_MINOR_X710 0x0007 + +#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ + I40E_FW_API_VERSION_MINOR_X710 : \ + I40E_FW_API_VERSION_MINOR_X722) + +/* API version 1.7 implements additional link and PHY-specific APIs */ +#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 + +struct i40e_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } internal; + struct { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; + } external; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets*/ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ + +/* error codes */ +enum i40e_admin_queue_err { + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ +}; + +/* Admin Queue command opcodes */ +enum i40e_admin_queue_opc { + /* aq commands */ + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, + + /* resource ownership */ + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, + + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, + + /* Proxy commands */ + i40e_aqc_opc_set_proxy_config = 0x0104, + i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, + + /* LAA */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, + + /* PXE */ + i40e_aqc_opc_clear_pxe_mode = 0x0110, + + /* WoL commands */ + i40e_aqc_opc_set_wol_filter = 0x0120, + i40e_aqc_opc_get_wake_reason = 0x0121, + + /* internal switch commands */ + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + i40e_aqc_opc_set_switch_config = 0x0205, + i40e_aqc_opc_rx_ctl_reg_read = 0x0206, + i40e_aqc_opc_rx_ctl_reg_write = 0x0207, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + i40e_aqc_opc_clear_wol_switch_filters = 0x025E, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, + + /* Dynamic Device Personalization */ + i40e_aqc_opc_write_personalization_profile = 0x0270, + i40e_aqc_opc_get_personalization_profile_list = 0x0271, + + /* DCB commands */ + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_set_dcb_parameters = 0x0303, + + /* TX scheduler */ + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + + /* phy commands*/ + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_run_phy_activity = 0x0626, + i40e_aqc_opc_set_phy_register = 0x0628, + i40e_aqc_opc_get_phy_register = 0x0629, + + /* NVM commands */ + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_oem_post_update = 0x0720, + i40e_aqc_opc_thermal_sensor = 0x0721, + + /* virtualization commands */ + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, + + /* alternate structure */ + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, + + /* LLDP commands */ + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + + /* Tunnel commands */ + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_set_rss_key = 0x0B02, + i40e_aqc_opc_set_rss_lut = 0x0B03, + i40e_aqc_opc_get_rss_key = 0x0B04, + i40e_aqc_opc_get_rss_lut = 0x0B05, + + /* Async Events */ + i40e_aqc_opc_event_lan_overflow = 0x1001, + + /* OEM commands */ + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, + i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, + + /* debug commands */ + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, +}; + +/* command structures and indirect data structures */ + +/* Structure naming conventions: + * - no suffix for direct command descriptor structures + * - _data for indirect sent data + * - _resp for indirect return data (data which is both will use _data) + * - _completion for direct return data + * - _element_ for repeated elements (may also be _data or _resp) + * + * Command structures are expected to overlay the params.raw member of the basic + * descriptor, and as such cannot exceed 16 bytes in length. + */ + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ + { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* This macro is used extensively to ensure that command structures are 16 + * bytes in length as they have to map to the raw array of that size. + */ +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) + +/* Queue Shutdown (direct 0x0003) */ +struct i40e_aqc_queue_shutdown { + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); + +struct i40e_aqc_vsi_properties_data { + /* first 96 byte are written by SW */ + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + /* switch section */ + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; + /* security section */ + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ + /* cascaded PV section */ + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + /* queueing option section */ + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 + u8 queueing_opt_reserved[3]; + /* scheduler section */ + u8 up_enable_bits; + u8 sched_reserved; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; + /* last 32 bytes are written by FW */ + __le16 qs_handle[8]; +#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); + +/* Get VEB Parameters (direct 0x0232) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_veb_parameters_completion { + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); + +#define I40E_LINK_SPEED_100MB_SHIFT 0x1 +#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 +#define I40E_LINK_SPEED_10GB_SHIFT 0x3 +#define I40E_LINK_SPEED_40GB_SHIFT 0x4 +#define I40E_LINK_SPEED_20GB_SHIFT 0x5 +#define I40E_LINK_SPEED_25GB_SHIFT 0x6 + +enum i40e_aq_link_speed { + I40E_LINK_SPEED_UNKNOWN = 0, + I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT), + I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT), +}; + +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) + */ +struct i40e_aqc_pf_vf_message { + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); + +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); + +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); + +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ + BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); +#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h new file mode 100644 index 000000000000..272d76b733aa --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_H_ +#define _IAVF_H_ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/aer.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include <linux/interrupt.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/sctp.h> +#include <linux/ipv6.h> +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/gfp.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/socket.h> +#include <linux/jiffies.h> +#include <net/ip6_checksum.h> +#include <net/pkt_cls.h> +#include <net/udp.h> +#include <net/tc_act/tc_gact.h> +#include <net/tc_act/tc_mirred.h> + +#include "iavf_type.h" +#include <linux/avf/virtchnl.h> +#include "iavf_txrx.h" + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 +#define PFX "iavf: " + +/* VSI state flags shared with common code */ +enum iavf_vsi_state_t { + __IAVF_VSI_DOWN, + /* This must be last as it determines the size of the BITMAP */ + __IAVF_VSI_STATE_SIZE__, +}; + +/* dummy struct to make common code less painful */ +struct iavf_vsi { + struct iavf_adapter *back; + struct net_device *netdev; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 seid; + u16 id; + DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__); + int base_vector; + u16 work_limit; + u16 qs_handle; + void *priv; /* client driver data reference. */ +}; + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IAVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define IAVF_DEFAULT_TXD 512 +#define IAVF_DEFAULT_RXD 512 +#define IAVF_MAX_TXD 4096 +#define IAVF_MIN_TXD 64 +#define IAVF_MAX_RXD 4096 +#define IAVF_MIN_RXD 64 +#define IAVF_REQ_DESCRIPTOR_MULTIPLE 32 +#define IAVF_MAX_AQ_BUF_SIZE 4096 +#define IAVF_AQ_LEN 32 +#define IAVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +#define IAVF_RX_DESC(R, i) (&(((union iavf_32byte_rx_desc *)((R)->desc))[i])) +#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i])) +#define IAVF_TX_CTXTDESC(R, i) \ + (&(((struct iavf_tx_context_desc *)((R)->desc))[i])) +#define IAVF_MAX_REQ_QUEUES 4 + +#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4) +#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4) +#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct iavf_q_vector { + struct iavf_adapter *adapter; + struct iavf_vsi *vsi; + struct napi_struct napi; + struct iavf_ring_container rx; + struct iavf_ring_container tx; + u32 ring_mask; + u8 itr_countdown; /* when 0 should adjust adaptive ITR */ + u8 num_ringpairs; /* total number of ring pairs in vector */ + u16 v_idx; /* index in the vsi->q_vector array. */ + u16 reg_idx; /* register index of the interrupt */ + char name[IFNAMSIZ + 15]; + bool arm_wb_state; + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; +}; + +/* Helper macros to switch between ints/sec and what the register uses. + * And yes, it's the same math going both ways. The lowest value + * supported by all of the i40e hardware is 8. + */ +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ + ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG + +#define IAVF_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define OTHER_VECTOR 1 +#define NONQ_VECS (OTHER_VECTOR) + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS) + +#define IAVF_QUEUE_END_OF_LIST 0x7FF +#define IAVF_FREE_VECTOR 0x7FFF +struct iavf_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; + bool remove; /* filter needs to be removed */ + bool add; /* filter needs to be added */ +}; + +struct iavf_vlan_filter { + struct list_head list; + u16 vlan; + bool remove; /* filter needs to be removed */ + bool add; /* filter needs to be added */ +}; + +#define IAVF_MAX_TRAFFIC_CLASS 4 +/* State of traffic class creation */ +enum iavf_tc_state_t { + __IAVF_TC_INVALID, /* no traffic class, default state */ + __IAVF_TC_RUNNING, /* traffic classes have been created */ +}; + +/* channel info */ +struct iavf_channel_config { + struct virtchnl_channel_info ch_info[IAVF_MAX_TRAFFIC_CLASS]; + enum iavf_tc_state_t state; + u8 total_qps; +}; + +/* State of cloud filter */ +enum iavf_cloud_filter_state_t { + __IAVF_CF_INVALID, /* cloud filter not added */ + __IAVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */ + __IAVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */ + __IAVF_CF_ACTIVE, /* cloud filter is active */ +}; + +/* Driver state. The order of these is important! */ +enum iavf_state_t { + __IAVF_STARTUP, /* driver loaded, probe complete */ + __IAVF_REMOVE, /* driver is being unloaded */ + __IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ + __IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ + __IAVF_INIT_SW, /* got resources, setting up structs */ + __IAVF_RESETTING, /* in reset */ + /* Below here, watchdog is running */ + __IAVF_DOWN, /* ready, can be opened */ + __IAVF_DOWN_PENDING, /* descending, waiting for watchdog */ + __IAVF_TESTING, /* in ethtool self-test */ + __IAVF_RUNNING, /* opened, working */ +}; + +enum iavf_critical_section_t { + __IAVF_IN_CRITICAL_TASK, /* cannot be interrupted */ + __IAVF_IN_CLIENT_TASK, + __IAVF_IN_REMOVE_TASK, /* device being removed */ +}; + +#define IAVF_CLOUD_FIELD_OMAC 0x01 +#define IAVF_CLOUD_FIELD_IMAC 0x02 +#define IAVF_CLOUD_FIELD_IVLAN 0x04 +#define IAVF_CLOUD_FIELD_TEN_ID 0x08 +#define IAVF_CLOUD_FIELD_IIP 0x10 + +#define IAVF_CF_FLAGS_OMAC IAVF_CLOUD_FIELD_OMAC +#define IAVF_CF_FLAGS_IMAC IAVF_CLOUD_FIELD_IMAC +#define IAVF_CF_FLAGS_IMAC_IVLAN (IAVF_CLOUD_FIELD_IMAC |\ + IAVF_CLOUD_FIELD_IVLAN) +#define IAVF_CF_FLAGS_IMAC_TEN_ID (IAVF_CLOUD_FIELD_IMAC |\ + IAVF_CLOUD_FIELD_TEN_ID) +#define IAVF_CF_FLAGS_OMAC_TEN_ID_IMAC (IAVF_CLOUD_FIELD_OMAC |\ + IAVF_CLOUD_FIELD_IMAC |\ + IAVF_CLOUD_FIELD_TEN_ID) +#define IAVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (IAVF_CLOUD_FIELD_IMAC |\ + IAVF_CLOUD_FIELD_IVLAN |\ + IAVF_CLOUD_FIELD_TEN_ID) +#define IAVF_CF_FLAGS_IIP IAVF_CLOUD_FIELD_IIP + +/* bookkeeping of cloud filters */ +struct iavf_cloud_filter { + enum iavf_cloud_filter_state_t state; + struct list_head list; + struct virtchnl_filter f; + unsigned long cookie; + bool del; /* filter needs to be deleted */ + bool add; /* filter needs to be added */ +}; + +/* board specific private data structure */ +struct iavf_adapter { + struct timer_list watchdog_timer; + struct work_struct reset_task; + struct work_struct adminq_task; + struct delayed_work client_task; + struct delayed_work init_task; + wait_queue_head_t down_waitqueue; + struct iavf_q_vector *q_vectors; + struct list_head vlan_filter_list; + struct list_head mac_filter_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; + char misc_vector_name[IFNAMSIZ + 9]; + int num_active_queues; + int num_req_queues; + + /* TX */ + struct iavf_ring *tx_rings; + u32 tx_timeout_count; + u32 tx_desc_count; + + /* RX */ + struct iavf_ring *rx_rings; + u64 hw_csum_rx_error; + u32 rx_desc_count; + int num_msix_vectors; + int num_iwarp_msix; + int iwarp_base_vector; + u32 client_pending; + struct i40e_client_instance *cinst; + struct msix_entry *msix_entries; + + u32 flags; +#define IAVF_FLAG_RX_CSUM_ENABLED BIT(0) +#define IAVF_FLAG_PF_COMMS_FAILED BIT(3) +#define IAVF_FLAG_RESET_PENDING BIT(4) +#define IAVF_FLAG_RESET_NEEDED BIT(5) +#define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) +#define IAVF_FLAG_ADDR_SET_BY_PF BIT(8) +#define IAVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) +#define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) +#define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11) +#define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12) +#define IAVF_FLAG_PROMISC_ON BIT(13) +#define IAVF_FLAG_ALLMULTI_ON BIT(14) +#define IAVF_FLAG_LEGACY_RX BIT(15) +#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) +#define IAVF_FLAG_QUEUES_DISABLED BIT(17) +/* duplicates for common code */ +#define IAVF_FLAG_DCB_ENABLED 0 + /* flags for admin queue service task */ + u32 aq_required; +#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT(0) +#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT(1) +#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT(2) +#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3) +#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT(4) +#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5) +#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) +#define IAVF_FLAG_AQ_MAP_VECTORS BIT(7) +#define IAVF_FLAG_AQ_HANDLE_RESET BIT(8) +#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ +#define IAVF_FLAG_AQ_GET_CONFIG BIT(10) +/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ +#define IAVF_FLAG_AQ_GET_HENA BIT(11) +#define IAVF_FLAG_AQ_SET_HENA BIT(12) +#define IAVF_FLAG_AQ_SET_RSS_KEY BIT(13) +#define IAVF_FLAG_AQ_SET_RSS_LUT BIT(14) +#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT(15) +#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT(16) +#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) +#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) +#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) +#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) +#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT(21) +#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) +#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) +#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + struct iavf_hw hw; /* defined in iavf_type.h */ + + enum iavf_state_t state; + unsigned long crit_section; + + struct work_struct watchdog_task; + bool netdev_registered; + bool link_up; + enum virtchnl_link_speed link_speed; + enum virtchnl_ops current_op; +#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ + (_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_IWARP : \ + 0) +#define CLIENT_ENABLED(_a) ((_a)->cinst) +/* RSS by the PF should be preferred over RSS via other methods. */ +#define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) +#define RSS_AQ(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_RSS_AQ) +#define RSS_REG(_a) (!((_a)->vf_res->vf_cap_flags & \ + (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF))) +#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_VLAN) + struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ + struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + struct virtchnl_version_info pf_version; +#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ + ((_a)->pf_version.minor == 1)) + u16 msg_enable; + struct iavf_eth_stats current_stats; + struct iavf_vsi vsi; + u32 aq_wait_count; + /* RSS stuff */ + u64 hena; + u16 rss_key_size; + u16 rss_lut_size; + u8 *rss_key; + u8 *rss_lut; + /* ADQ related members */ + struct iavf_channel_config ch_config; + u8 num_tc; + struct list_head cloud_filter_list; + /* lock to protect access to the cloud filter list */ + spinlock_t cloud_filter_list_lock; + u16 num_cloud_filters; +}; + + +/* Ethtool Private Flags */ + +/* lan device, used by client interface */ +struct i40e_device { + struct list_head list; + struct iavf_adapter *vf; +}; + +/* needed by iavf_ethtool.c */ +extern char iavf_driver_name[]; +extern const char iavf_driver_version[]; + +int iavf_up(struct iavf_adapter *adapter); +void iavf_down(struct iavf_adapter *adapter); +int iavf_process_config(struct iavf_adapter *adapter); +void iavf_schedule_reset(struct iavf_adapter *adapter); +void iavf_reset(struct iavf_adapter *adapter); +void iavf_set_ethtool_ops(struct net_device *netdev); +void iavf_update_stats(struct iavf_adapter *adapter); +void iavf_reset_interrupt_capability(struct iavf_adapter *adapter); +int iavf_init_interrupt_scheme(struct iavf_adapter *adapter); +void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask); +void iavf_free_all_tx_resources(struct iavf_adapter *adapter); +void iavf_free_all_rx_resources(struct iavf_adapter *adapter); + +void iavf_napi_add_all(struct iavf_adapter *adapter); +void iavf_napi_del_all(struct iavf_adapter *adapter); + +int iavf_send_api_ver(struct iavf_adapter *adapter); +int iavf_verify_api_ver(struct iavf_adapter *adapter); +int iavf_send_vf_config_msg(struct iavf_adapter *adapter); +int iavf_get_vf_config(struct iavf_adapter *adapter); +void iavf_irq_enable(struct iavf_adapter *adapter, bool flush); +void iavf_configure_queues(struct iavf_adapter *adapter); +void iavf_deconfigure_queues(struct iavf_adapter *adapter); +void iavf_enable_queues(struct iavf_adapter *adapter); +void iavf_disable_queues(struct iavf_adapter *adapter); +void iavf_map_queues(struct iavf_adapter *adapter); +int iavf_request_queues(struct iavf_adapter *adapter, int num); +void iavf_add_ether_addrs(struct iavf_adapter *adapter); +void iavf_del_ether_addrs(struct iavf_adapter *adapter); +void iavf_add_vlans(struct iavf_adapter *adapter); +void iavf_del_vlans(struct iavf_adapter *adapter); +void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags); +void iavf_request_stats(struct iavf_adapter *adapter); +void iavf_request_reset(struct iavf_adapter *adapter); +void iavf_get_hena(struct iavf_adapter *adapter); +void iavf_set_hena(struct iavf_adapter *adapter); +void iavf_set_rss_key(struct iavf_adapter *adapter); +void iavf_set_rss_lut(struct iavf_adapter *adapter); +void iavf_enable_vlan_stripping(struct iavf_adapter *adapter); +void iavf_disable_vlan_stripping(struct iavf_adapter *adapter); +void iavf_virtchnl_completion(struct iavf_adapter *adapter, + enum virtchnl_ops v_opcode, + iavf_status v_retval, u8 *msg, u16 msglen); +int iavf_config_rss(struct iavf_adapter *adapter); +int iavf_lan_add_device(struct iavf_adapter *adapter); +int iavf_lan_del_device(struct iavf_adapter *adapter); +void iavf_client_subtask(struct iavf_adapter *adapter); +void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len); +void iavf_notify_client_l2_params(struct iavf_vsi *vsi); +void iavf_notify_client_open(struct iavf_vsi *vsi); +void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset); +void iavf_enable_channels(struct iavf_adapter *adapter); +void iavf_disable_channels(struct iavf_adapter *adapter); +void iavf_add_cloud_filter(struct iavf_adapter *adapter); +void iavf_del_cloud_filter(struct iavf_adapter *adapter); +#endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_alloc.h b/drivers/net/ethernet/intel/iavf/iavf_alloc.h new file mode 100644 index 000000000000..bf2753146f30 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_alloc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_ALLOC_H_ +#define _IAVF_ALLOC_H_ + +struct iavf_hw; + +/* Memory allocation types */ +enum iavf_memory_type { + iavf_mem_arq_buf = 0, /* ARQ indirect command buffer */ + iavf_mem_asq_buf = 1, + iavf_mem_atq_buf = 2, /* ATQ indirect command buffer */ + iavf_mem_arq_ring = 3, /* ARQ descriptor ring */ + iavf_mem_atq_ring = 4, /* ATQ descriptor ring */ + iavf_mem_pd = 5, /* Page Descriptor */ + iavf_mem_bp = 6, /* Backing Page - 4KB */ + iavf_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ + iavf_mem_reserved +}; + +/* prototype for functions used for dynamic memory allocation */ +iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem, + enum iavf_memory_type type, + u64 size, u32 alignment); +iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem); +iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw, + struct iavf_virt_mem *mem, u32 size); +iavf_status iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem); + +#endif /* _IAVF_ALLOC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c index 3cc9d60d0d72..aea45364fd1c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c +++ b/drivers/net/ethernet/intel/iavf/iavf_client.c @@ -4,36 +4,36 @@ #include <linux/list.h> #include <linux/errno.h> -#include "i40evf.h" -#include "i40e_prototype.h" -#include "i40evf_client.h" +#include "iavf.h" +#include "iavf_prototype.h" +#include "iavf_client.h" static -const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR; +const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR; static struct i40e_client *vf_registered_client; -static LIST_HEAD(i40evf_devices); -static DEFINE_MUTEX(i40evf_device_mutex); +static LIST_HEAD(i40e_devices); +static DEFINE_MUTEX(iavf_device_mutex); -static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, - struct i40e_client *client, - u8 *msg, u16 len); +static u32 iavf_client_virtchnl_send(struct i40e_info *ldev, + struct i40e_client *client, + u8 *msg, u16 len); -static int i40evf_client_setup_qvlist(struct i40e_info *ldev, - struct i40e_client *client, - struct i40e_qvlist_info *qvlist_info); +static int iavf_client_setup_qvlist(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_qvlist_info *qvlist_info); -static struct i40e_ops i40evf_lan_ops = { - .virtchnl_send = i40evf_client_virtchnl_send, - .setup_qvlist = i40evf_client_setup_qvlist, +static struct i40e_ops iavf_lan_ops = { + .virtchnl_send = iavf_client_virtchnl_send, + .setup_qvlist = iavf_client_setup_qvlist, }; /** - * i40evf_client_get_params - retrieve relevant client parameters + * iavf_client_get_params - retrieve relevant client parameters * @vsi: VSI with parameters * @params: client param struct **/ static -void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) +void iavf_client_get_params(struct iavf_vsi *vsi, struct i40e_params *params) { int i; @@ -41,21 +41,21 @@ void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) params->mtu = vsi->netdev->mtu; params->link_up = vsi->back->link_up; - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + for (i = 0; i < IAVF_MAX_USER_PRIORITY; i++) { params->qos.prio_qos[i].tc = 0; params->qos.prio_qos[i].qs_handle = vsi->qs_handle; } } /** - * i40evf_notify_client_message - call the client message receive callback + * iavf_notify_client_message - call the client message receive callback * @vsi: the VSI associated with this client * @msg: message buffer * @len: length of message * * If there is a client to this VSI, call the client **/ -void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) +void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len) { struct i40e_client_instance *cinst; @@ -74,12 +74,12 @@ void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) } /** - * i40evf_notify_client_l2_params - call the client notify callback + * iavf_notify_client_l2_params - call the client notify callback * @vsi: the VSI with l2 param changes * * If there is a client to this VSI, call the client **/ -void i40evf_notify_client_l2_params(struct i40e_vsi *vsi) +void iavf_notify_client_l2_params(struct iavf_vsi *vsi) { struct i40e_client_instance *cinst; struct i40e_params params; @@ -95,21 +95,21 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi) "Cannot locate client instance l2_param_change function\n"); return; } - i40evf_client_get_params(vsi, ¶ms); + iavf_client_get_params(vsi, ¶ms); cinst->lan_info.params = params; cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client, ¶ms); } /** - * i40evf_notify_client_open - call the client open callback + * iavf_notify_client_open - call the client open callback * @vsi: the VSI with netdev opened * * If there is a client to this netdev, call the client with open **/ -void i40evf_notify_client_open(struct i40e_vsi *vsi) +void iavf_notify_client_open(struct iavf_vsi *vsi) { - struct i40evf_adapter *adapter = vsi->back; + struct iavf_adapter *adapter = vsi->back; struct i40e_client_instance *cinst = adapter->cinst; int ret; @@ -127,22 +127,22 @@ void i40evf_notify_client_open(struct i40e_vsi *vsi) } /** - * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map + * iavf_client_release_qvlist - send a message to the PF to release iwarp qv map * @ldev: pointer to L2 context. * * Return 0 on success or < 0 on error **/ -static int i40evf_client_release_qvlist(struct i40e_info *ldev) +static int iavf_client_release_qvlist(struct i40e_info *ldev) { - struct i40evf_adapter *adapter = ldev->vf; - i40e_status err; + struct iavf_adapter *adapter = ldev->vf; + iavf_status err; if (adapter->aq_required) return -EAGAIN; - err = i40e_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, - I40E_SUCCESS, NULL, 0, NULL); + err = iavf_aq_send_msg_to_pf(&adapter->hw, + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, + I40E_SUCCESS, NULL, 0, NULL); if (err) dev_err(&adapter->pdev->dev, @@ -153,15 +153,15 @@ static int i40evf_client_release_qvlist(struct i40e_info *ldev) } /** - * i40evf_notify_client_close - call the client close callback + * iavf_notify_client_close - call the client close callback * @vsi: the VSI with netdev closed * @reset: true when close called due to reset pending * * If there is a client to this netdev, call the client with close **/ -void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset) +void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset) { - struct i40evf_adapter *adapter = vsi->back; + struct iavf_adapter *adapter = vsi->back; struct i40e_client_instance *cinst = adapter->cinst; if (!cinst || !cinst->client || !cinst->client->ops || @@ -171,21 +171,21 @@ void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset) return; } cinst->client->ops->close(&cinst->lan_info, cinst->client, reset); - i40evf_client_release_qvlist(&cinst->lan_info); + iavf_client_release_qvlist(&cinst->lan_info); clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); } /** - * i40evf_client_add_instance - add a client instance to the instance list + * iavf_client_add_instance - add a client instance to the instance list * @adapter: pointer to the board struct * * Returns cinst ptr on success, NULL on failure **/ static struct i40e_client_instance * -i40evf_client_add_instance(struct i40evf_adapter *adapter) +iavf_client_add_instance(struct iavf_adapter *adapter) { struct i40e_client_instance *cinst = NULL; - struct i40e_vsi *vsi = &adapter->vsi; + struct iavf_vsi *vsi = &adapter->vsi; struct netdev_hw_addr *mac = NULL; struct i40e_params params; @@ -207,11 +207,11 @@ i40evf_client_add_instance(struct i40evf_adapter *adapter) cinst->lan_info.fid = 0; cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF; cinst->lan_info.hw_addr = adapter->hw.hw_addr; - cinst->lan_info.ops = &i40evf_lan_ops; - cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR; - cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR; - cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD; - i40evf_client_get_params(vsi, ¶ms); + cinst->lan_info.ops = &iavf_lan_ops; + cinst->lan_info.version.major = IAVF_CLIENT_VERSION_MAJOR; + cinst->lan_info.version.minor = IAVF_CLIENT_VERSION_MINOR; + cinst->lan_info.version.build = IAVF_CLIENT_VERSION_BUILD; + iavf_client_get_params(vsi, ¶ms); cinst->lan_info.params = params; set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state); @@ -233,28 +233,28 @@ out: } /** - * i40evf_client_del_instance - removes a client instance from the list + * iavf_client_del_instance - removes a client instance from the list * @adapter: pointer to the board struct * **/ static -void i40evf_client_del_instance(struct i40evf_adapter *adapter) +void iavf_client_del_instance(struct iavf_adapter *adapter) { kfree(adapter->cinst); adapter->cinst = NULL; } /** - * i40evf_client_subtask - client maintenance work + * iavf_client_subtask - client maintenance work * @adapter: board private structure **/ -void i40evf_client_subtask(struct i40evf_adapter *adapter) +void iavf_client_subtask(struct iavf_adapter *adapter) { struct i40e_client *client = vf_registered_client; struct i40e_client_instance *cinst; int ret = 0; - if (adapter->state < __I40EVF_DOWN) + if (adapter->state < __IAVF_DOWN) return; /* first check client is registered */ @@ -262,7 +262,7 @@ void i40evf_client_subtask(struct i40evf_adapter *adapter) return; /* Add the client instance to the instance list */ - cinst = i40evf_client_add_instance(adapter); + cinst = iavf_client_add_instance(adapter); if (!cinst) return; @@ -279,23 +279,23 @@ void i40evf_client_subtask(struct i40evf_adapter *adapter) &cinst->state); else /* remove client instance */ - i40evf_client_del_instance(adapter); + iavf_client_del_instance(adapter); } } /** - * i40evf_lan_add_device - add a lan device struct to the list of lan devices + * iavf_lan_add_device - add a lan device struct to the list of lan devices * @adapter: pointer to the board struct * * Returns 0 on success or none 0 on error **/ -int i40evf_lan_add_device(struct i40evf_adapter *adapter) +int iavf_lan_add_device(struct iavf_adapter *adapter) { struct i40e_device *ldev; int ret = 0; - mutex_lock(&i40evf_device_mutex); - list_for_each_entry(ldev, &i40evf_devices, list) { + mutex_lock(&iavf_device_mutex); + list_for_each_entry(ldev, &i40e_devices, list) { if (ldev->vf == adapter) { ret = -EEXIST; goto out; @@ -308,7 +308,7 @@ int i40evf_lan_add_device(struct i40evf_adapter *adapter) } ldev->vf = adapter; INIT_LIST_HEAD(&ldev->list); - list_add(&ldev->list, &i40evf_devices); + list_add(&ldev->list, &i40e_devices); dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", adapter->hw.bus.bus_id, adapter->hw.bus.device, adapter->hw.bus.func); @@ -316,26 +316,26 @@ int i40evf_lan_add_device(struct i40evf_adapter *adapter) /* Since in some cases register may have happened before a device gets * added, we can schedule a subtask to go initiate the clients. */ - adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; out: - mutex_unlock(&i40evf_device_mutex); + mutex_unlock(&iavf_device_mutex); return ret; } /** - * i40evf_lan_del_device - removes a lan device from the device list + * iavf_lan_del_device - removes a lan device from the device list * @adapter: pointer to the board struct * * Returns 0 on success or non-0 on error **/ -int i40evf_lan_del_device(struct i40evf_adapter *adapter) +int iavf_lan_del_device(struct iavf_adapter *adapter) { struct i40e_device *ldev, *tmp; int ret = -ENODEV; - mutex_lock(&i40evf_device_mutex); - list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) { + mutex_lock(&iavf_device_mutex); + list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) { if (ldev->vf == adapter) { dev_info(&adapter->pdev->dev, "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", @@ -348,23 +348,23 @@ int i40evf_lan_del_device(struct i40evf_adapter *adapter) } } - mutex_unlock(&i40evf_device_mutex); + mutex_unlock(&iavf_device_mutex); return ret; } /** - * i40evf_client_release - release client specific resources + * iavf_client_release - release client specific resources * @client: pointer to the registered client * **/ -static void i40evf_client_release(struct i40e_client *client) +static void iavf_client_release(struct i40e_client *client) { struct i40e_client_instance *cinst; struct i40e_device *ldev; - struct i40evf_adapter *adapter; + struct iavf_adapter *adapter; - mutex_lock(&i40evf_device_mutex); - list_for_each_entry(ldev, &i40evf_devices, list) { + mutex_lock(&iavf_device_mutex); + list_for_each_entry(ldev, &i40e_devices, list) { adapter = ldev->vf; cinst = adapter->cinst; if (!cinst) @@ -373,41 +373,41 @@ static void i40evf_client_release(struct i40e_client *client) if (client->ops && client->ops->close) client->ops->close(&cinst->lan_info, client, false); - i40evf_client_release_qvlist(&cinst->lan_info); + iavf_client_release_qvlist(&cinst->lan_info); clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); dev_warn(&adapter->pdev->dev, "Client %s instance closed\n", client->name); } /* delete the client instance */ - i40evf_client_del_instance(adapter); + iavf_client_del_instance(adapter); dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n", client->name); } - mutex_unlock(&i40evf_device_mutex); + mutex_unlock(&iavf_device_mutex); } /** - * i40evf_client_prepare - prepare client specific resources + * iavf_client_prepare - prepare client specific resources * @client: pointer to the registered client * **/ -static void i40evf_client_prepare(struct i40e_client *client) +static void iavf_client_prepare(struct i40e_client *client) { struct i40e_device *ldev; - struct i40evf_adapter *adapter; + struct iavf_adapter *adapter; - mutex_lock(&i40evf_device_mutex); - list_for_each_entry(ldev, &i40evf_devices, list) { + mutex_lock(&iavf_device_mutex); + list_for_each_entry(ldev, &i40e_devices, list) { adapter = ldev->vf; /* Signal the watchdog to service the client */ - adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } - mutex_unlock(&i40evf_device_mutex); + mutex_unlock(&iavf_device_mutex); } /** - * i40evf_client_virtchnl_send - send a message to the PF instance + * iavf_client_virtchnl_send - send a message to the PF instance * @ldev: pointer to L2 context. * @client: Client pointer. * @msg: pointer to message buffer @@ -415,17 +415,17 @@ static void i40evf_client_prepare(struct i40e_client *client) * * Return 0 on success or < 0 on error **/ -static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, - struct i40e_client *client, - u8 *msg, u16 len) +static u32 iavf_client_virtchnl_send(struct i40e_info *ldev, + struct i40e_client *client, + u8 *msg, u16 len) { - struct i40evf_adapter *adapter = ldev->vf; - i40e_status err; + struct iavf_adapter *adapter = ldev->vf; + iavf_status err; if (adapter->aq_required) return -EAGAIN; - err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP, + err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP, I40E_SUCCESS, msg, len, NULL); if (err) dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n", @@ -435,21 +435,21 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, } /** - * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map + * iavf_client_setup_qvlist - send a message to the PF to setup iwarp qv map * @ldev: pointer to L2 context. * @client: Client pointer. * @qvlist_info: queue and vector list * * Return 0 on success or < 0 on error **/ -static int i40evf_client_setup_qvlist(struct i40e_info *ldev, - struct i40e_client *client, - struct i40e_qvlist_info *qvlist_info) +static int iavf_client_setup_qvlist(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_qvlist_info *qvlist_info) { struct virtchnl_iwarp_qvlist_info *v_qvlist_info; - struct i40evf_adapter *adapter = ldev->vf; + struct iavf_adapter *adapter = ldev->vf; struct i40e_qv_info *qv_info; - i40e_status err; + iavf_status err; u32 v_idx, i; u32 msg_size; @@ -474,9 +474,9 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev, (v_qvlist_info->num_vectors - 1)); adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); - err = i40e_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, - I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL); + err = iavf_aq_send_msg_to_pf(&adapter->hw, + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, I40E_SUCCESS, + (u8 *)v_qvlist_info, msg_size, NULL); if (err) { dev_err(&adapter->pdev->dev, @@ -499,12 +499,12 @@ out: } /** - * i40evf_register_client - Register a i40e client driver with the L2 driver + * iavf_register_client - Register a i40e client driver with the L2 driver * @client: pointer to the i40e_client struct * * Returns 0 on success or non-0 on error **/ -int i40evf_register_client(struct i40e_client *client) +int iavf_register_client(struct i40e_client *client) { int ret = 0; @@ -514,48 +514,48 @@ int i40evf_register_client(struct i40e_client *client) } if (strlen(client->name) == 0) { - pr_info("i40evf: Failed to register client with no name\n"); + pr_info("iavf: Failed to register client with no name\n"); ret = -EIO; goto out; } if (vf_registered_client) { - pr_info("i40evf: Client %s has already been registered!\n", + pr_info("iavf: Client %s has already been registered!\n", client->name); ret = -EEXIST; goto out; } - if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) || - (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) { - pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n", + if ((client->version.major != IAVF_CLIENT_VERSION_MAJOR) || + (client->version.minor != IAVF_CLIENT_VERSION_MINOR)) { + pr_info("iavf: Failed to register client %s due to mismatched client interface version\n", client->name); pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n", client->version.major, client->version.minor, client->version.build, - i40evf_client_interface_version_str); + iavf_client_interface_version_str); ret = -EIO; goto out; } vf_registered_client = client; - i40evf_client_prepare(client); + iavf_client_prepare(client); - pr_info("i40evf: Registered client %s with return code %d\n", + pr_info("iavf: Registered client %s with return code %d\n", client->name, ret); out: return ret; } -EXPORT_SYMBOL(i40evf_register_client); +EXPORT_SYMBOL(iavf_register_client); /** - * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver + * iavf_unregister_client - Unregister a i40e client driver with the L2 driver * @client: pointer to the i40e_client struct * * Returns 0 on success or non-0 on error **/ -int i40evf_unregister_client(struct i40e_client *client) +int iavf_unregister_client(struct i40e_client *client) { int ret = 0; @@ -563,17 +563,17 @@ int i40evf_unregister_client(struct i40e_client *client) * a close for each of the client instances that were opened. * client_release function is called to handle this. */ - i40evf_client_release(client); + iavf_client_release(client); if (vf_registered_client != client) { - pr_info("i40evf: Client %s has not been registered\n", + pr_info("iavf: Client %s has not been registered\n", client->name); ret = -ENODEV; goto out; } vf_registered_client = NULL; - pr_info("i40evf: Unregistered client %s\n", client->name); + pr_info("iavf: Unregistered client %s\n", client->name); out: return ret; } -EXPORT_SYMBOL(i40evf_unregister_client); +EXPORT_SYMBOL(iavf_unregister_client); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h index 5585f362048a..e216fc9dfd81 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h +++ b/drivers/net/ethernet/intel/iavf/iavf_client.h @@ -1,21 +1,21 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#ifndef _I40EVF_CLIENT_H_ -#define _I40EVF_CLIENT_H_ +#ifndef _IAVF_CLIENT_H_ +#define _IAVF_CLIENT_H_ -#define I40EVF_CLIENT_STR_LENGTH 10 +#define IAVF_CLIENT_STR_LENGTH 10 /* Client interface version should be updated anytime there is a change in the * existing APIs or data structures. */ -#define I40EVF_CLIENT_VERSION_MAJOR 0 -#define I40EVF_CLIENT_VERSION_MINOR 01 -#define I40EVF_CLIENT_VERSION_BUILD 00 -#define I40EVF_CLIENT_VERSION_STR \ - __stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \ - __stringify(I40EVF_CLIENT_VERSION_MINOR) "." \ - __stringify(I40EVF_CLIENT_VERSION_BUILD) +#define IAVF_CLIENT_VERSION_MAJOR 0 +#define IAVF_CLIENT_VERSION_MINOR 01 +#define IAVF_CLIENT_VERSION_BUILD 00 +#define IAVF_CLIENT_VERSION_STR \ + __stringify(IAVF_CLIENT_VERSION_MAJOR) "." \ + __stringify(IAVF_CLIENT_VERSION_MINOR) "." \ + __stringify(IAVF_CLIENT_VERSION_BUILD) struct i40e_client_version { u8 major; @@ -90,7 +90,7 @@ struct i40e_info { #define I40E_CLIENT_FTYPE_PF 0 #define I40E_CLIENT_FTYPE_VF 1 u8 ftype; /* function type, PF or VF */ - void *vf; /* cast to i40evf_adapter */ + void *vf; /* cast to iavf_adapter */ /* All L2 params that could change during the life span of the device * and needs to be communicated to the client when they change @@ -151,7 +151,7 @@ struct i40e_client_instance { struct i40e_client { struct list_head list; /* list of registered clients */ - char name[I40EVF_CLIENT_STR_LENGTH]; + char name[IAVF_CLIENT_STR_LENGTH]; struct i40e_client_version version; unsigned long state; /* client state */ atomic_t ref_cnt; /* Count of all the client devices of this kind */ @@ -164,6 +164,6 @@ struct i40e_client { }; /* used by clients */ -int i40evf_register_client(struct i40e_client *client); -int i40evf_unregister_client(struct i40e_client *client); -#endif /* _I40EVF_CLIENT_H_ */ +int iavf_register_client(struct i40e_client *client); +int iavf_unregister_client(struct i40e_client *client); +#endif /* _IAVF_CLIENT_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c new file mode 100644 index 000000000000..768369c89e77 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -0,0 +1,955 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#include "iavf_type.h" +#include "i40e_adminq.h" +#include "iavf_prototype.h" +#include <linux/avf/virtchnl.h> + +/** + * iavf_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +iavf_status iavf_set_mac_type(struct iavf_hw *hw) +{ + iavf_status status = 0; + + if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { + switch (hw->device_id) { + case IAVF_DEV_ID_X722_VF: + hw->mac.type = IAVF_MAC_X722_VF; + break; + case IAVF_DEV_ID_VF: + case IAVF_DEV_ID_VF_HV: + case IAVF_DEV_ID_ADAPTIVE_VF: + hw->mac.type = IAVF_MAC_VF; + break; + default: + hw->mac.type = IAVF_MAC_GENERIC; + break; + } + } else { + status = I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + hw_dbg(hw, "found mac: %d, returns: %d\n", hw->mac.type, status); + return status; +} + +/** + * iavf_aq_str - convert AQ err code to a string + * @hw: pointer to the HW structure + * @aq_err: the AQ error code to convert + **/ +const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err) +{ + switch (aq_err) { + case I40E_AQ_RC_OK: + return "OK"; + case I40E_AQ_RC_EPERM: + return "I40E_AQ_RC_EPERM"; + case I40E_AQ_RC_ENOENT: + return "I40E_AQ_RC_ENOENT"; + case I40E_AQ_RC_ESRCH: + return "I40E_AQ_RC_ESRCH"; + case I40E_AQ_RC_EINTR: + return "I40E_AQ_RC_EINTR"; + case I40E_AQ_RC_EIO: + return "I40E_AQ_RC_EIO"; + case I40E_AQ_RC_ENXIO: + return "I40E_AQ_RC_ENXIO"; + case I40E_AQ_RC_E2BIG: + return "I40E_AQ_RC_E2BIG"; + case I40E_AQ_RC_EAGAIN: + return "I40E_AQ_RC_EAGAIN"; + case I40E_AQ_RC_ENOMEM: + return "I40E_AQ_RC_ENOMEM"; + case I40E_AQ_RC_EACCES: + return "I40E_AQ_RC_EACCES"; + case I40E_AQ_RC_EFAULT: + return "I40E_AQ_RC_EFAULT"; + case I40E_AQ_RC_EBUSY: + return "I40E_AQ_RC_EBUSY"; + case I40E_AQ_RC_EEXIST: + return "I40E_AQ_RC_EEXIST"; + case I40E_AQ_RC_EINVAL: + return "I40E_AQ_RC_EINVAL"; + case I40E_AQ_RC_ENOTTY: + return "I40E_AQ_RC_ENOTTY"; + case I40E_AQ_RC_ENOSPC: + return "I40E_AQ_RC_ENOSPC"; + case I40E_AQ_RC_ENOSYS: + return "I40E_AQ_RC_ENOSYS"; + case I40E_AQ_RC_ERANGE: + return "I40E_AQ_RC_ERANGE"; + case I40E_AQ_RC_EFLUSHED: + return "I40E_AQ_RC_EFLUSHED"; + case I40E_AQ_RC_BAD_ADDR: + return "I40E_AQ_RC_BAD_ADDR"; + case I40E_AQ_RC_EMODE: + return "I40E_AQ_RC_EMODE"; + case I40E_AQ_RC_EFBIG: + return "I40E_AQ_RC_EFBIG"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); + return hw->err_str; +} + +/** + * iavf_stat_str - convert status err code to a string + * @hw: pointer to the HW structure + * @stat_err: the status error code to convert + **/ +const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err) +{ + switch (stat_err) { + case 0: + return "OK"; + case I40E_ERR_NVM: + return "I40E_ERR_NVM"; + case I40E_ERR_NVM_CHECKSUM: + return "I40E_ERR_NVM_CHECKSUM"; + case I40E_ERR_PHY: + return "I40E_ERR_PHY"; + case I40E_ERR_CONFIG: + return "I40E_ERR_CONFIG"; + case I40E_ERR_PARAM: + return "I40E_ERR_PARAM"; + case I40E_ERR_MAC_TYPE: + return "I40E_ERR_MAC_TYPE"; + case I40E_ERR_UNKNOWN_PHY: + return "I40E_ERR_UNKNOWN_PHY"; + case I40E_ERR_LINK_SETUP: + return "I40E_ERR_LINK_SETUP"; + case I40E_ERR_ADAPTER_STOPPED: + return "I40E_ERR_ADAPTER_STOPPED"; + case I40E_ERR_INVALID_MAC_ADDR: + return "I40E_ERR_INVALID_MAC_ADDR"; + case I40E_ERR_DEVICE_NOT_SUPPORTED: + return "I40E_ERR_DEVICE_NOT_SUPPORTED"; + case I40E_ERR_MASTER_REQUESTS_PENDING: + return "I40E_ERR_MASTER_REQUESTS_PENDING"; + case I40E_ERR_INVALID_LINK_SETTINGS: + return "I40E_ERR_INVALID_LINK_SETTINGS"; + case I40E_ERR_AUTONEG_NOT_COMPLETE: + return "I40E_ERR_AUTONEG_NOT_COMPLETE"; + case I40E_ERR_RESET_FAILED: + return "I40E_ERR_RESET_FAILED"; + case I40E_ERR_SWFW_SYNC: + return "I40E_ERR_SWFW_SYNC"; + case I40E_ERR_NO_AVAILABLE_VSI: + return "I40E_ERR_NO_AVAILABLE_VSI"; + case I40E_ERR_NO_MEMORY: + return "I40E_ERR_NO_MEMORY"; + case I40E_ERR_BAD_PTR: + return "I40E_ERR_BAD_PTR"; + case I40E_ERR_RING_FULL: + return "I40E_ERR_RING_FULL"; + case I40E_ERR_INVALID_PD_ID: + return "I40E_ERR_INVALID_PD_ID"; + case I40E_ERR_INVALID_QP_ID: + return "I40E_ERR_INVALID_QP_ID"; + case I40E_ERR_INVALID_CQ_ID: + return "I40E_ERR_INVALID_CQ_ID"; + case I40E_ERR_INVALID_CEQ_ID: + return "I40E_ERR_INVALID_CEQ_ID"; + case I40E_ERR_INVALID_AEQ_ID: + return "I40E_ERR_INVALID_AEQ_ID"; + case I40E_ERR_INVALID_SIZE: + return "I40E_ERR_INVALID_SIZE"; + case I40E_ERR_INVALID_ARP_INDEX: + return "I40E_ERR_INVALID_ARP_INDEX"; + case I40E_ERR_INVALID_FPM_FUNC_ID: + return "I40E_ERR_INVALID_FPM_FUNC_ID"; + case I40E_ERR_QP_INVALID_MSG_SIZE: + return "I40E_ERR_QP_INVALID_MSG_SIZE"; + case I40E_ERR_QP_TOOMANY_WRS_POSTED: + return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; + case I40E_ERR_INVALID_FRAG_COUNT: + return "I40E_ERR_INVALID_FRAG_COUNT"; + case I40E_ERR_QUEUE_EMPTY: + return "I40E_ERR_QUEUE_EMPTY"; + case I40E_ERR_INVALID_ALIGNMENT: + return "I40E_ERR_INVALID_ALIGNMENT"; + case I40E_ERR_FLUSHED_QUEUE: + return "I40E_ERR_FLUSHED_QUEUE"; + case I40E_ERR_INVALID_PUSH_PAGE_INDEX: + return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; + case I40E_ERR_INVALID_IMM_DATA_SIZE: + return "I40E_ERR_INVALID_IMM_DATA_SIZE"; + case I40E_ERR_TIMEOUT: + return "I40E_ERR_TIMEOUT"; + case I40E_ERR_OPCODE_MISMATCH: + return "I40E_ERR_OPCODE_MISMATCH"; + case I40E_ERR_CQP_COMPL_ERROR: + return "I40E_ERR_CQP_COMPL_ERROR"; + case I40E_ERR_INVALID_VF_ID: + return "I40E_ERR_INVALID_VF_ID"; + case I40E_ERR_INVALID_HMCFN_ID: + return "I40E_ERR_INVALID_HMCFN_ID"; + case I40E_ERR_BACKING_PAGE_ERROR: + return "I40E_ERR_BACKING_PAGE_ERROR"; + case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: + return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; + case I40E_ERR_INVALID_PBLE_INDEX: + return "I40E_ERR_INVALID_PBLE_INDEX"; + case I40E_ERR_INVALID_SD_INDEX: + return "I40E_ERR_INVALID_SD_INDEX"; + case I40E_ERR_INVALID_PAGE_DESC_INDEX: + return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; + case I40E_ERR_INVALID_SD_TYPE: + return "I40E_ERR_INVALID_SD_TYPE"; + case I40E_ERR_MEMCPY_FAILED: + return "I40E_ERR_MEMCPY_FAILED"; + case I40E_ERR_INVALID_HMC_OBJ_INDEX: + return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; + case I40E_ERR_INVALID_HMC_OBJ_COUNT: + return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; + case I40E_ERR_INVALID_SRQ_ARM_LIMIT: + return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; + case I40E_ERR_SRQ_ENABLED: + return "I40E_ERR_SRQ_ENABLED"; + case I40E_ERR_ADMIN_QUEUE_ERROR: + return "I40E_ERR_ADMIN_QUEUE_ERROR"; + case I40E_ERR_ADMIN_QUEUE_TIMEOUT: + return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; + case I40E_ERR_BUF_TOO_SHORT: + return "I40E_ERR_BUF_TOO_SHORT"; + case I40E_ERR_ADMIN_QUEUE_FULL: + return "I40E_ERR_ADMIN_QUEUE_FULL"; + case I40E_ERR_ADMIN_QUEUE_NO_WORK: + return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; + case I40E_ERR_BAD_IWARP_CQE: + return "I40E_ERR_BAD_IWARP_CQE"; + case I40E_ERR_NVM_BLANK_MODE: + return "I40E_ERR_NVM_BLANK_MODE"; + case I40E_ERR_NOT_IMPLEMENTED: + return "I40E_ERR_NOT_IMPLEMENTED"; + case I40E_ERR_PE_DOORBELL_NOT_ENABLED: + return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; + case I40E_ERR_DIAG_TEST_FAILED: + return "I40E_ERR_DIAG_TEST_FAILED"; + case I40E_ERR_NOT_READY: + return "I40E_ERR_NOT_READY"; + case I40E_NOT_SUPPORTED: + return "I40E_NOT_SUPPORTED"; + case I40E_ERR_FIRMWARE_API_VERSION: + return "I40E_ERR_FIRMWARE_API_VERSION"; + case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: + return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); + return hw->err_str; +} + +/** + * iavf_debug_aq + * @hw: debug mask related to admin queue + * @mask: debug mask + * @desc: pointer to admin queue descriptor + * @buffer: pointer to command buffer + * @buf_len: max length of buffer + * + * Dumps debug log about adminq command with descriptor contents. + **/ +void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, + void *buffer, u16 buf_len) +{ + struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u8 *buf = (u8 *)buffer; + + if ((!(mask & hw->debug_mask)) || !desc) + return; + + iavf_debug(hw, mask, + "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); + iavf_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); + iavf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); + iavf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); + + if (buffer && aq_desc->datalen) { + u16 len = le16_to_cpu(aq_desc->datalen); + + iavf_debug(hw, mask, "AQ CMD Buffer:\n"); + if (buf_len < len) + len = buf_len; + /* write the full 16-byte chunks */ + if (hw->debug_mask & mask) { + char prefix[27]; + + snprintf(prefix, sizeof(prefix), + "iavf %02x:%02x.%x: \t0x", + hw->bus.bus_id, + hw->bus.device, + hw->bus.func); + + print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); + } + } +} + +/** + * iavf_check_asq_alive + * @hw: pointer to the hw struct + * + * Returns true if Queue is enabled else false. + **/ +bool iavf_check_asq_alive(struct iavf_hw *hw) +{ + if (hw->aq.asq.len) + return !!(rd32(hw, hw->aq.asq.len) & + IAVF_VF_ATQLEN1_ATQENABLE_MASK); + else + return false; +} + +/** + * iavf_aq_queue_shutdown + * @hw: pointer to the hw struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well. + **/ +iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_queue_shutdown *cmd = + (struct i40e_aqc_queue_shutdown *)&desc.params.raw; + iavf_status status; + + iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown); + + if (unloading) + cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); + status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/** + * iavf_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * @set: set true to set the table, false to get the table + * + * Internal function to get or set RSS look up table + **/ +static iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) +{ + iavf_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_lut *cmd_resp = + (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + + if (set) + iavf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_lut); + else + iavf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_lut); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + + if (pf_lut) + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + else + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + + status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL); + + return status; +} + +/** + * iavf_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * get the RSS lookup table, PF or VSI type + **/ +iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, + false); +} + +/** + * iavf_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * set the RSS lookup table, PF or VSI type + **/ +iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); +} + +/** + * iavf_aq_get_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get the RSS key per VSI + **/ +static +iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) +{ + iavf_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_key *cmd_resp = + (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; + u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + + if (set) + iavf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_key); + else + iavf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_key); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + + status = iavf_asq_send_command(hw, &desc, key, key_size, NULL); + + return status; +} + +/** + * iavf_aq_get_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + **/ +iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return iavf_aq_get_set_rss_key(hw, vsi_id, key, false); +} + +/** + * iavf_aq_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + * set the RSS key per VSI + **/ +iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return iavf_aq_get_set_rss_key(hw, vsi_id, key, true); +} + +/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT iavf_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF iavf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum iavf_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + IAVF_RX_PTYPE_OUTER_##OUTER_IP, \ + IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + IAVF_RX_PTYPE_##OUTER_FRAG, \ + IAVF_RX_PTYPE_TUNNEL_##T, \ + IAVF_RX_PTYPE_TUNNEL_END_##TE, \ + IAVF_RX_PTYPE_##TEF, \ + IAVF_RX_PTYPE_INNER_PROT_##I, \ + IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define IAVF_PTT_UNUSED_ENTRY(PTYPE) \ + { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG +#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG +#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = { + /* L2 Packet types */ + IAVF_PTT_UNUSED_ENTRY(0), + IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), + IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + IAVF_PTT_UNUSED_ENTRY(4), + IAVF_PTT_UNUSED_ENTRY(5), + IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + IAVF_PTT_UNUSED_ENTRY(8), + IAVF_PTT_UNUSED_ENTRY(9), + IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + + /* Non Tunneled IPv4 */ + IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(25), + IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(32), + IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(39), + IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(47), + IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(54), + IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(62), + IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(69), + IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(77), + IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(84), + IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + IAVF_PTT_UNUSED_ENTRY(91), + IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(98), + IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(105), + IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(113), + IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(120), + IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(128), + IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(135), + IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(143), + IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + IAVF_PTT_UNUSED_ENTRY(150), + IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + IAVF_PTT_UNUSED_ENTRY(154), + IAVF_PTT_UNUSED_ENTRY(155), + IAVF_PTT_UNUSED_ENTRY(156), + IAVF_PTT_UNUSED_ENTRY(157), + IAVF_PTT_UNUSED_ENTRY(158), + IAVF_PTT_UNUSED_ENTRY(159), + + IAVF_PTT_UNUSED_ENTRY(160), + IAVF_PTT_UNUSED_ENTRY(161), + IAVF_PTT_UNUSED_ENTRY(162), + IAVF_PTT_UNUSED_ENTRY(163), + IAVF_PTT_UNUSED_ENTRY(164), + IAVF_PTT_UNUSED_ENTRY(165), + IAVF_PTT_UNUSED_ENTRY(166), + IAVF_PTT_UNUSED_ENTRY(167), + IAVF_PTT_UNUSED_ENTRY(168), + IAVF_PTT_UNUSED_ENTRY(169), + + IAVF_PTT_UNUSED_ENTRY(170), + IAVF_PTT_UNUSED_ENTRY(171), + IAVF_PTT_UNUSED_ENTRY(172), + IAVF_PTT_UNUSED_ENTRY(173), + IAVF_PTT_UNUSED_ENTRY(174), + IAVF_PTT_UNUSED_ENTRY(175), + IAVF_PTT_UNUSED_ENTRY(176), + IAVF_PTT_UNUSED_ENTRY(177), + IAVF_PTT_UNUSED_ENTRY(178), + IAVF_PTT_UNUSED_ENTRY(179), + + IAVF_PTT_UNUSED_ENTRY(180), + IAVF_PTT_UNUSED_ENTRY(181), + IAVF_PTT_UNUSED_ENTRY(182), + IAVF_PTT_UNUSED_ENTRY(183), + IAVF_PTT_UNUSED_ENTRY(184), + IAVF_PTT_UNUSED_ENTRY(185), + IAVF_PTT_UNUSED_ENTRY(186), + IAVF_PTT_UNUSED_ENTRY(187), + IAVF_PTT_UNUSED_ENTRY(188), + IAVF_PTT_UNUSED_ENTRY(189), + + IAVF_PTT_UNUSED_ENTRY(190), + IAVF_PTT_UNUSED_ENTRY(191), + IAVF_PTT_UNUSED_ENTRY(192), + IAVF_PTT_UNUSED_ENTRY(193), + IAVF_PTT_UNUSED_ENTRY(194), + IAVF_PTT_UNUSED_ENTRY(195), + IAVF_PTT_UNUSED_ENTRY(196), + IAVF_PTT_UNUSED_ENTRY(197), + IAVF_PTT_UNUSED_ENTRY(198), + IAVF_PTT_UNUSED_ENTRY(199), + + IAVF_PTT_UNUSED_ENTRY(200), + IAVF_PTT_UNUSED_ENTRY(201), + IAVF_PTT_UNUSED_ENTRY(202), + IAVF_PTT_UNUSED_ENTRY(203), + IAVF_PTT_UNUSED_ENTRY(204), + IAVF_PTT_UNUSED_ENTRY(205), + IAVF_PTT_UNUSED_ENTRY(206), + IAVF_PTT_UNUSED_ENTRY(207), + IAVF_PTT_UNUSED_ENTRY(208), + IAVF_PTT_UNUSED_ENTRY(209), + + IAVF_PTT_UNUSED_ENTRY(210), + IAVF_PTT_UNUSED_ENTRY(211), + IAVF_PTT_UNUSED_ENTRY(212), + IAVF_PTT_UNUSED_ENTRY(213), + IAVF_PTT_UNUSED_ENTRY(214), + IAVF_PTT_UNUSED_ENTRY(215), + IAVF_PTT_UNUSED_ENTRY(216), + IAVF_PTT_UNUSED_ENTRY(217), + IAVF_PTT_UNUSED_ENTRY(218), + IAVF_PTT_UNUSED_ENTRY(219), + + IAVF_PTT_UNUSED_ENTRY(220), + IAVF_PTT_UNUSED_ENTRY(221), + IAVF_PTT_UNUSED_ENTRY(222), + IAVF_PTT_UNUSED_ENTRY(223), + IAVF_PTT_UNUSED_ENTRY(224), + IAVF_PTT_UNUSED_ENTRY(225), + IAVF_PTT_UNUSED_ENTRY(226), + IAVF_PTT_UNUSED_ENTRY(227), + IAVF_PTT_UNUSED_ENTRY(228), + IAVF_PTT_UNUSED_ENTRY(229), + + IAVF_PTT_UNUSED_ENTRY(230), + IAVF_PTT_UNUSED_ENTRY(231), + IAVF_PTT_UNUSED_ENTRY(232), + IAVF_PTT_UNUSED_ENTRY(233), + IAVF_PTT_UNUSED_ENTRY(234), + IAVF_PTT_UNUSED_ENTRY(235), + IAVF_PTT_UNUSED_ENTRY(236), + IAVF_PTT_UNUSED_ENTRY(237), + IAVF_PTT_UNUSED_ENTRY(238), + IAVF_PTT_UNUSED_ENTRY(239), + + IAVF_PTT_UNUSED_ENTRY(240), + IAVF_PTT_UNUSED_ENTRY(241), + IAVF_PTT_UNUSED_ENTRY(242), + IAVF_PTT_UNUSED_ENTRY(243), + IAVF_PTT_UNUSED_ENTRY(244), + IAVF_PTT_UNUSED_ENTRY(245), + IAVF_PTT_UNUSED_ENTRY(246), + IAVF_PTT_UNUSED_ENTRY(247), + IAVF_PTT_UNUSED_ENTRY(248), + IAVF_PTT_UNUSED_ENTRY(249), + + IAVF_PTT_UNUSED_ENTRY(250), + IAVF_PTT_UNUSED_ENTRY(251), + IAVF_PTT_UNUSED_ENTRY(252), + IAVF_PTT_UNUSED_ENTRY(253), + IAVF_PTT_UNUSED_ENTRY(254), + IAVF_PTT_UNUSED_ENTRY(255) +}; + +/** + * iavf_aq_send_msg_to_pf + * @hw: pointer to the hardware structure + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cmd_details: pointer to command details + * + * Send message to PF driver using admin queue. By default, this message + * is sent asynchronously, i.e. iavf_asq_send_command() does not wait for + * completion before returning. + **/ +iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, + enum virtchnl_ops v_opcode, + iavf_status v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_asq_cmd_details details; + struct i40e_aq_desc desc; + iavf_status status; + + iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); + desc.cookie_high = cpu_to_le32(v_opcode); + desc.cookie_low = cpu_to_le32(v_retval); + if (msglen) { + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF + | I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.datalen = cpu_to_le16(msglen); + } + if (!cmd_details) { + memset(&details, 0, sizeof(details)); + details.async = true; + cmd_details = &details; + } + status = iavf_asq_send_command(hw, &desc, msg, msglen, cmd_details); + return status; +} + +/** + * iavf_vf_parse_hw_config + * @hw: pointer to the hardware structure + * @msg: pointer to the virtual channel VF resource structure + * + * Given a VF resource message from the PF, populate the hw struct + * with appropriate information. + **/ +void iavf_vf_parse_hw_config(struct iavf_hw *hw, + struct virtchnl_vf_resource *msg) +{ + struct virtchnl_vsi_resource *vsi_res; + int i; + + vsi_res = &msg->vsi_res[0]; + + hw->dev_caps.num_vsis = msg->num_vsis; + hw->dev_caps.num_rx_qp = msg->num_queue_pairs; + hw->dev_caps.num_tx_qp = msg->num_queue_pairs; + hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; + hw->dev_caps.dcb = msg->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_L2; + hw->dev_caps.fcoe = 0; + for (i = 0; i < msg->num_vsis; i++) { + if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { + ether_addr_copy(hw->mac.perm_addr, + vsi_res->default_mac_addr); + ether_addr_copy(hw->mac.addr, + vsi_res->default_mac_addr); + } + vsi_res++; + } +} + +/** + * iavf_vf_reset + * @hw: pointer to the hardware structure + * + * Send a VF_RESET message to the PF. Does not wait for response from PF + * as none will be forthcoming. Immediately after calling this function, + * the admin queue should be shut down and (optionally) reinitialized. + **/ +iavf_status iavf_vf_reset(struct iavf_hw *hw) +{ + return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, + 0, NULL, 0, NULL); +} diff --git a/drivers/net/ethernet/intel/iavf/iavf_devids.h b/drivers/net/ethernet/intel/iavf/iavf_devids.h new file mode 100644 index 000000000000..8eb7b697e96c --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_devids.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_DEVIDS_H_ +#define _IAVF_DEVIDS_H_ + +/* Device IDs for the VF driver */ +#define IAVF_DEV_ID_VF 0x154C +#define IAVF_DEV_ID_VF_HV 0x1571 +#define IAVF_DEV_ID_ADAPTIVE_VF 0x1889 +#define IAVF_DEV_ID_X722_VF 0x37CD +#endif /* _IAVF_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c new file mode 100644 index 000000000000..9f87304109fe --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -0,0 +1,1036 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +/* ethtool support for iavf */ +#include "iavf.h" + +#include <linux/uaccess.h> + +/* ethtool statistics helpers */ + +/** + * struct iavf_stats - definition for an ethtool statistic + * @stat_string: statistic name to display in ethtool -S output + * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) + * @stat_offset: offsetof() the stat from a base pointer + * + * This structure defines a statistic to be added to the ethtool stats buffer. + * It defines a statistic as offset from a common base pointer. Stats should + * be defined in constant arrays using the IAVF_STAT macro, with every element + * of the array using the same _type for calculating the sizeof_stat and + * stat_offset. + * + * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or + * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from + * the iavf_add_ethtool_stat() helper function. + * + * The @stat_string is interpreted as a format string, allowing formatted + * values to be inserted while looping over multiple structures for a given + * statistics array. Thus, every statistic string in an array should have the + * same type and number of format specifiers, to be formatted by variadic + * arguments to the iavf_add_stat_string() helper function. + **/ +struct iavf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro to define an iavf_stat structure with proper size and type. + * Use this when defining constant statistics arrays. Note that @_type expects + * only a type name and is used multiple times. + */ +#define IAVF_STAT(_type, _name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +/* Helper macro for defining some statistics related to queues */ +#define IAVF_QUEUE_STAT(_name, _stat) \ + IAVF_STAT(struct iavf_ring, _name, _stat) + +/* Stats associated with a Tx or Rx ring */ +static const struct iavf_stats iavf_gstrings_queue_stats[] = { + IAVF_QUEUE_STAT("%s-%u.packets", stats.packets), + IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes), +}; + +/** + * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer + * @data: location to store the stat value + * @pointer: basis for where to copy from + * @stat: the stat definition + * + * Copies the stat data defined by the pointer and stat structure pair into + * the memory supplied as data. Used to implement iavf_add_ethtool_stats and + * iavf_add_queue_stats. If the pointer is null, data will be zero'd. + */ +static void +iavf_add_one_ethtool_stat(u64 *data, void *pointer, + const struct iavf_stats *stat) +{ + char *p; + + if (!pointer) { + /* ensure that the ethtool data buffer is zero'd for any stats + * which don't have a valid pointer. + */ + *data = 0; + return; + } + + p = (char *)pointer + stat->stat_offset; + switch (stat->sizeof_stat) { + case sizeof(u64): + *data = *((u64 *)p); + break; + case sizeof(u32): + *data = *((u32 *)p); + break; + case sizeof(u16): + *data = *((u16 *)p); + break; + case sizeof(u8): + *data = *((u8 *)p); + break; + default: + WARN_ONCE(1, "unexpected stat size for %s", + stat->stat_string); + *data = 0; + } +} + +/** + * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location to copy stats from + * @stats: array of stats to copy + * @size: the size of the stats definition + * + * Copy the stats defined by the stats array using the pointer as a base into + * the data buffer supplied by ethtool. Updates the data pointer to point to + * the next empty location for successive calls to __iavf_add_ethtool_stats. + * If pointer is null, set the data values to zero and update the pointer to + * skip these stats. + **/ +static void +__iavf_add_ethtool_stats(u64 **data, void *pointer, + const struct iavf_stats stats[], + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]); +} + +/** + * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location where stats are stored + * @stats: static const array of stat definitions + * + * Macro to ease the use of __iavf_add_ethtool_stats by taking a static + * constant stats array and passing the ARRAY_SIZE(). This avoids typos by + * ensuring that we pass the size associated with the given stats array. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. + **/ +#define iavf_add_ethtool_stats(data, pointer, stats) \ + __iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) + +/** + * iavf_add_queue_stats - copy queue statistics into supplied buffer + * @data: ethtool stats buffer + * @ring: the ring to copy + * + * Queue statistics must be copied while protected by + * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats. + * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the + * ring pointer is null, zero out the queue stat values and update the data + * pointer. Otherwise safely copy the stats from the ring into the supplied + * buffer and update the data pointer when finished. + * + * This function expects to be called while under rcu_read_lock(). + **/ +static void +iavf_add_queue_stats(u64 **data, struct iavf_ring *ring) +{ + const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats); + const struct iavf_stats *stats = iavf_gstrings_queue_stats; + unsigned int start; + unsigned int i; + + /* To avoid invalid statistics values, ensure that we keep retrying + * the copy until we get a consistent value according to + * u64_stats_fetch_retry_irq. But first, make sure our ring is + * non-null before attempting to access its syncp. + */ + do { + start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); + for (i = 0; i < size; i++) + iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]); + } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); + + /* Once we successfully copy the stats in, update the data pointer */ + *data += size; +} + +/** + * __iavf_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @size: size of the stats array + * + * Format and copy the strings described by stats into the buffer pointed at + * by p. + **/ +static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[], + const unsigned int size, ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +/** + * iavf_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * + * Format and copy the strings described by the const static stats value into + * the buffer pointed at by p. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. Additionally, stats must be an array such that + * ARRAY_SIZE can be called on it. + **/ +#define iavf_add_stat_strings(p, stats, ...) \ + __iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) + +#define VF_STAT(_name, _stat) \ + IAVF_STAT(struct iavf_adapter, _name, _stat) + +static const struct iavf_stats iavf_gstrings_stats[] = { + VF_STAT("rx_bytes", current_stats.rx_bytes), + VF_STAT("rx_unicast", current_stats.rx_unicast), + VF_STAT("rx_multicast", current_stats.rx_multicast), + VF_STAT("rx_broadcast", current_stats.rx_broadcast), + VF_STAT("rx_discards", current_stats.rx_discards), + VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), + VF_STAT("tx_bytes", current_stats.tx_bytes), + VF_STAT("tx_unicast", current_stats.tx_unicast), + VF_STAT("tx_multicast", current_stats.tx_multicast), + VF_STAT("tx_broadcast", current_stats.tx_broadcast), + VF_STAT("tx_discards", current_stats.tx_discards), + VF_STAT("tx_errors", current_stats.tx_errors), +}; + +#define IAVF_STATS_LEN ARRAY_SIZE(iavf_gstrings_stats) + +#define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats) + +/* For now we have one and only one private flag and it is only defined + * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead + * of leaving all this code sitting around empty we will strip it unless + * our one private flag is actually available. + */ +struct iavf_priv_flags { + char flag_string[ETH_GSTRING_LEN]; + u32 flag; + bool read_only; +}; + +#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \ + .flag_string = _name, \ + .flag = _flag, \ + .read_only = _read_only, \ +} + +static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = { + IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0), +}; + +#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags) + +/** + * iavf_get_link_ksettings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @cmd: ethtool command + * + * Reports speed/duplex settings. Because this is a VF, we don't know what + * kind of link we really have, so we fake it. + **/ +static int iavf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = PORT_NONE; + /* Set speed and duplex */ + switch (adapter->link_speed) { + case I40E_LINK_SPEED_40GB: + cmd->base.speed = SPEED_40000; + break; + case I40E_LINK_SPEED_25GB: +#ifdef SPEED_25000 + cmd->base.speed = SPEED_25000; +#else + netdev_info(netdev, + "Speed is 25G, display not supported by this version of ethtool.\n"); +#endif + break; + case I40E_LINK_SPEED_20GB: + cmd->base.speed = SPEED_20000; + break; + case I40E_LINK_SPEED_10GB: + cmd->base.speed = SPEED_10000; + break; + case I40E_LINK_SPEED_1GB: + cmd->base.speed = SPEED_1000; + break; + case I40E_LINK_SPEED_100MB: + cmd->base.speed = SPEED_100; + break; + default: + break; + } + cmd->base.duplex = DUPLEX_FULL; + + return 0; +} + +/** + * iavf_get_sset_count - Get length of string set + * @netdev: network interface device structure + * @sset: id of string set + * + * Reports size of various string tables. + **/ +static int iavf_get_sset_count(struct net_device *netdev, int sset) +{ + if (sset == ETH_SS_STATS) + return IAVF_STATS_LEN + + (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES); + else if (sset == ETH_SS_PRIV_FLAGS) + return IAVF_PRIV_FLAGS_STR_LEN; + else + return -EINVAL; +} + +/** + * iavf_get_ethtool_stats - report device statistics + * @netdev: network interface device structure + * @stats: ethtool statistics structure + * @data: pointer to data buffer + * + * All statistics are added to the data buffer as an array of u64. + **/ +static void iavf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + unsigned int i; + + iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); + + rcu_read_lock(); + for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) { + struct iavf_ring *ring; + + /* Avoid accessing un-allocated queues */ + ring = (i < adapter->num_active_queues ? + &adapter->tx_rings[i] : NULL); + iavf_add_queue_stats(&data, ring); + + /* Avoid accessing un-allocated queues */ + ring = (i < adapter->num_active_queues ? + &adapter->rx_rings[i] : NULL); + iavf_add_queue_stats(&data, ring); + } + rcu_read_unlock(); +} + +/** + * iavf_get_priv_flag_strings - Get private flag strings + * @netdev: network interface device structure + * @data: buffer for string data + * + * Builds the private flags string table + **/ +static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + unsigned int i; + + for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { + snprintf(data, ETH_GSTRING_LEN, "%s", + iavf_gstrings_priv_flags[i].flag_string); + data += ETH_GSTRING_LEN; + } +} + +/** + * iavf_get_stat_strings - Get stat strings + * @netdev: network interface device structure + * @data: buffer for string data + * + * Builds the statistics string table + **/ +static void iavf_get_stat_strings(struct net_device *netdev, u8 *data) +{ + unsigned int i; + + iavf_add_stat_strings(&data, iavf_gstrings_stats); + + /* Queues are always allocated in pairs, so we just use num_tx_queues + * for both Tx and Rx queues. + */ + for (i = 0; i < netdev->num_tx_queues; i++) { + iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, + "tx", i); + iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, + "rx", i); + } +} + +/** + * iavf_get_strings - Get string set + * @netdev: network interface device structure + * @sset: id of string set + * @data: buffer for string data + * + * Builds string tables for various string sets + **/ +static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + switch (sset) { + case ETH_SS_STATS: + iavf_get_stat_strings(netdev, data); + break; + case ETH_SS_PRIV_FLAGS: + iavf_get_priv_flag_strings(netdev, data); + break; + default: + break; + } +} + +/** + * iavf_get_priv_flags - report device private flags + * @netdev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 iavf_get_priv_flags(struct net_device *netdev) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + u32 i, ret_flags = 0; + + for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { + const struct iavf_priv_flags *priv_flags; + + priv_flags = &iavf_gstrings_priv_flags[i]; + + if (priv_flags->flag & adapter->flags) + ret_flags |= BIT(i); + } + + return ret_flags; +} + +/** + * iavf_set_priv_flags - set private flags + * @netdev: network interface device structure + * @flags: bit flags to be set + **/ +static int iavf_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + u32 orig_flags, new_flags, changed_flags; + u32 i; + + orig_flags = READ_ONCE(adapter->flags); + new_flags = orig_flags; + + for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { + const struct iavf_priv_flags *priv_flags; + + priv_flags = &iavf_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + new_flags |= priv_flags->flag; + else + new_flags &= ~(priv_flags->flag); + + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & ~BIT(i))) + return -EOPNOTSUPP; + } + + /* Before we finalize any flag changes, any checks which we need to + * perform to determine if the new flags will be supported should go + * here... + */ + + /* Compare and exchange the new flags into place. If we failed, that + * is if cmpxchg returns anything but the old value, this means + * something else must have modified the flags variable since we + * copied it. We'll just punt with an error and log something in the + * message buffer. + */ + if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) { + dev_warn(&adapter->pdev->dev, + "Unable to update adapter->flags as it was modified by another thread...\n"); + return -EAGAIN; + } + + changed_flags = orig_flags ^ new_flags; + + /* Process any additional changes needed as a result of flag changes. + * The changed_flags value reflects the list of bits that were changed + * in the code above. + */ + + /* issue a reset to force legacy-rx change to take effect */ + if (changed_flags & IAVF_FLAG_LEGACY_RX) { + if (netif_running(netdev)) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } + } + + return 0; +} + +/** + * iavf_get_msglevel - Get debug message level + * @netdev: network interface device structure + * + * Returns current debug message level. + **/ +static u32 iavf_get_msglevel(struct net_device *netdev) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +/** + * iavf_set_msglevel - Set debug message level + * @netdev: network interface device structure + * @data: message level + * + * Set current debug message level. Higher values cause the driver to + * be noisier. + **/ +static void iavf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + if (IAVF_DEBUG_USER & data) + adapter->hw.debug_mask = data; + adapter->msg_enable = data; +} + +/** + * iavf_get_drvinfo - Get driver info + * @netdev: network interface device structure + * @drvinfo: ethool driver info structure + * + * Returns information about the driver and device for display to the user. + **/ +static void iavf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, iavf_driver_name, 32); + strlcpy(drvinfo->version, iavf_driver_version, 32); + strlcpy(drvinfo->fw_version, "N/A", 4); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN; +} + +/** + * iavf_get_ringparam - Get ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * + * Returns current ring parameters. TX and RX rings are reported separately, + * but the number of rings is not reported. + **/ +static void iavf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IAVF_MAX_RXD; + ring->tx_max_pending = IAVF_MAX_TXD; + ring->rx_pending = adapter->rx_desc_count; + ring->tx_pending = adapter->tx_desc_count; +} + +/** + * iavf_set_ringparam - Set ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * + * Sets ring parameters. TX and RX rings are controlled separately, but the + * number of rings is not specified, so all rings get the same settings. + **/ +static int iavf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + IAVF_MIN_TXD, + IAVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + IAVF_MIN_RXD, + IAVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + + /* if nothing to do return success */ + if ((new_tx_count == adapter->tx_desc_count) && + (new_rx_count == adapter->rx_desc_count)) + return 0; + + adapter->tx_desc_count = new_tx_count; + adapter->rx_desc_count = new_rx_count; + + if (netif_running(netdev)) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } + + return 0; +} + +/** + * __iavf_get_coalesce - get per-queue coalesce settings + * @netdev: the netdev to check + * @ec: ethtool coalesce data structure + * @queue: which queue to pick + * + * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs + * are per queue. If queue is <0 then we default to queue 0 as the + * representative value. + **/ +static int __iavf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, int queue) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_vsi *vsi = &adapter->vsi; + struct iavf_ring *rx_ring, *tx_ring; + + ec->tx_max_coalesced_frames = vsi->work_limit; + ec->rx_max_coalesced_frames = vsi->work_limit; + + /* Rx and Tx usecs per queue value. If user doesn't specify the + * queue, return queue 0's value to represent. + */ + if (queue < 0) + queue = 0; + else if (queue >= adapter->num_active_queues) + return -EINVAL; + + rx_ring = &adapter->rx_rings[queue]; + tx_ring = &adapter->tx_rings[queue]; + + if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) + ec->use_adaptive_rx_coalesce = 1; + + if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) + ec->use_adaptive_tx_coalesce = 1; + + ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + return 0; +} + +/** + * iavf_get_coalesce - Get interrupt coalescing settings + * @netdev: network interface device structure + * @ec: ethtool coalesce structure + * + * Returns current coalescing settings. This is referred to elsewhere in the + * driver as Interrupt Throttle Rate, as this is how the hardware describes + * this functionality. Note that if per-queue settings have been modified this + * only represents the settings of queue 0. + **/ +static int iavf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + return __iavf_get_coalesce(netdev, ec, -1); +} + +/** + * iavf_get_per_queue_coalesce - get coalesce values for specific queue + * @netdev: netdev to read + * @ec: coalesce settings from ethtool + * @queue: the queue to read + * + * Read specific queue's coalesce settings. + **/ +static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __iavf_get_coalesce(netdev, ec, queue); +} + +/** + * iavf_set_itr_per_queue - set ITR values for specific queue + * @adapter: the VF adapter struct to set values for + * @ec: coalesce settings from ethtool + * @queue: the queue to modify + * + * Change the ITR settings for a specific queue. + **/ +static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, + struct ethtool_coalesce *ec, int queue) +{ + struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; + struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; + struct iavf_q_vector *q_vector; + + rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); + tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); + + rx_ring->itr_setting |= IAVF_ITR_DYNAMIC; + if (!ec->use_adaptive_rx_coalesce) + rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC; + + tx_ring->itr_setting |= IAVF_ITR_DYNAMIC; + if (!ec->use_adaptive_tx_coalesce) + tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC; + + q_vector = rx_ring->q_vector; + q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); + + q_vector = tx_ring->q_vector; + q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); + + /* The interrupt handler itself will take care of programming + * the Tx and Rx ITR values based on the values we have entered + * into the q_vector, no need to write the values now. + */ +} + +/** + * __iavf_set_coalesce - set coalesce settings for particular queue + * @netdev: the netdev to change + * @ec: ethtool coalesce settings + * @queue: the queue to change + * + * Sets the coalesce settings for a particular queue. + **/ +static int __iavf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, int queue) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_vsi *vsi = &adapter->vsi; + int i; + + if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) + vsi->work_limit = ec->tx_max_coalesced_frames_irq; + + if (ec->rx_coalesce_usecs == 0) { + if (ec->use_adaptive_rx_coalesce) + netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); + } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) || + (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) { + netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); + return -EINVAL; + } else if (ec->tx_coalesce_usecs == 0) { + if (ec->use_adaptive_tx_coalesce) + netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); + } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) || + (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) { + netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); + return -EINVAL; + } + + /* Rx and Tx usecs has per queue value. If user doesn't specify the + * queue, apply to all queues. + */ + if (queue < 0) { + for (i = 0; i < adapter->num_active_queues; i++) + iavf_set_itr_per_queue(adapter, ec, i); + } else if (queue < adapter->num_active_queues) { + iavf_set_itr_per_queue(adapter, ec, queue); + } else { + netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", + adapter->num_active_queues - 1); + return -EINVAL; + } + + return 0; +} + +/** + * iavf_set_coalesce - Set interrupt coalescing settings + * @netdev: network interface device structure + * @ec: ethtool coalesce structure + * + * Change current coalescing settings for every queue. + **/ +static int iavf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + return __iavf_set_coalesce(netdev, ec, -1); +} + +/** + * iavf_set_per_queue_coalesce - set specific queue's coalesce settings + * @netdev: the netdev to change + * @ec: ethtool's coalesce settings + * @queue: the queue to modify + * + * Modifies a specific queue's coalesce settings. + */ +static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __iavf_set_coalesce(netdev, ec, queue); +} + +/** + * iavf_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule locations + * + * Returns Success if the command is supported. + **/ +static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_active_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + netdev_info(netdev, + "RSS hash info is not available to vf, use pf.\n"); + break; + default: + break; + } + + return ret; +} +/** + * iavf_get_channels: get the number of channels supported by the device + * @netdev: network interface device structure + * @ch: channel information structure + * + * For the purposes of our device, we only use combined channels, i.e. a tx/rx + * queue pair. Report one extra channel to match our "other" MSI-X vector. + **/ +static void iavf_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = IAVF_MAX_REQ_QUEUES; + + ch->max_other = NONQ_VECS; + ch->other_count = NONQ_VECS; + + ch->combined_count = adapter->num_active_queues; +} + +/** + * iavf_set_channels: set the new channel count + * @netdev: network interface device structure + * @ch: channel information structure + * + * Negotiate a new number of channels with the PF then do a reset. During + * reset we'll realloc queues and fix the RSS table. Returns 0 on success, + * negative on failure. + **/ +static int iavf_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + int num_req = ch->combined_count; + + if (num_req != adapter->num_active_queues && + !(adapter->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { + dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n"); + return -EINVAL; + } + + if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) { + dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n"); + return -EINVAL; + } + + /* All of these should have already been checked by ethtool before this + * even gets to us, but just to be sure. + */ + if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES) + return -EINVAL; + + if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) + return -EINVAL; + + adapter->num_req_queues = num_req; + return iavf_request_queues(adapter, num_req); +} + +/** + * iavf_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 iavf_get_rxfh_key_size(struct net_device *netdev) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_key_size; +} + +/** + * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 iavf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_lut_size; +} + +/** + * iavf_get_rxfh - get the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function in use + * + * Reads the indirection table directly from the hardware. Always returns 0. + **/ +static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + u16 i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + + memcpy(key, adapter->rss_key, adapter->rss_key_size); + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + indir[i] = (u32)adapter->rss_lut[i]; + + return 0; +} + +/** + * iavf_set_rxfh - set the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function to use + * + * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * returns 0 after programming the table. + **/ +static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + u16 i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + if (key) + memcpy(adapter->rss_key, key, adapter->rss_key_size); + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = (u8)(indir[i]); + + return iavf_config_rss(adapter); +} + +static const struct ethtool_ops iavf_ethtool_ops = { + .get_drvinfo = iavf_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = iavf_get_ringparam, + .set_ringparam = iavf_set_ringparam, + .get_strings = iavf_get_strings, + .get_ethtool_stats = iavf_get_ethtool_stats, + .get_sset_count = iavf_get_sset_count, + .get_priv_flags = iavf_get_priv_flags, + .set_priv_flags = iavf_set_priv_flags, + .get_msglevel = iavf_get_msglevel, + .set_msglevel = iavf_set_msglevel, + .get_coalesce = iavf_get_coalesce, + .set_coalesce = iavf_set_coalesce, + .get_per_queue_coalesce = iavf_get_per_queue_coalesce, + .set_per_queue_coalesce = iavf_set_per_queue_coalesce, + .get_rxnfc = iavf_get_rxnfc, + .get_rxfh_indir_size = iavf_get_rxfh_indir_size, + .get_rxfh = iavf_get_rxfh, + .set_rxfh = iavf_set_rxfh, + .get_channels = iavf_get_channels, + .set_channels = iavf_set_channels, + .get_rxfh_key_size = iavf_get_rxfh_key_size, + .get_link_ksettings = iavf_get_link_ksettings, +}; + +/** + * iavf_set_ethtool_ops - Initialize ethtool ops struct + * @netdev: network interface device structure + * + * Sets ethtool ops struct in our netdev so that ethtool can call + * our functions. + **/ +void iavf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &iavf_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index fef6d892ed4c..9f2b7b7adf6b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1,38 +1,38 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40evf.h" -#include "i40e_prototype.h" -#include "i40evf_client.h" -/* All i40evf tracepoints are defined by the include below, which must +#include "iavf.h" +#include "iavf_prototype.h" +#include "iavf_client.h" +/* All iavf tracepoints are defined by the include below, which must * be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined */ #define CREATE_TRACE_POINTS -#include "i40e_trace.h" +#include "iavf_trace.h" -static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); -static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); -static int i40evf_close(struct net_device *netdev); +static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); +static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); +static int iavf_close(struct net_device *netdev); -char i40evf_driver_name[] = "i40evf"; -static const char i40evf_driver_string[] = - "Intel(R) 40-10 Gigabit Virtual Function Network Driver"; +char iavf_driver_name[] = "iavf"; +static const char iavf_driver_string[] = + "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 3 #define DRV_VERSION_MINOR 2 -#define DRV_VERSION_BUILD 2 +#define DRV_VERSION_BUILD 3 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ DRV_KERN -const char i40evf_driver_version[] = DRV_VERSION; -static const char i40evf_copyright[] = - "Copyright (c) 2013 - 2015 Intel Corporation."; +const char iavf_driver_version[] = DRV_VERSION; +static const char iavf_copyright[] = + "Copyright (c) 2013 - 2018 Intel Corporation."; -/* i40evf_pci_tbl - PCI Device ID Table +/* iavf_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s @@ -40,36 +40,37 @@ static const char i40evf_copyright[] = * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static const struct pci_device_id i40evf_pci_tbl[] = { - {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0}, +static const struct pci_device_id iavf_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, + {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, + {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, + {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, /* required last entry */ {0, } }; -MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl); +MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); +MODULE_ALIAS("i40evf"); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); -MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); -static struct workqueue_struct *i40evf_wq; +static struct workqueue_struct *iavf_wq; /** - * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code + * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested * @alignment: what to align the allocation to **/ -i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw, - struct i40e_dma_mem *mem, - u64 size, u32 alignment) +iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, + struct iavf_dma_mem *mem, + u64 size, u32 alignment) { - struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; + struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; if (!mem) return I40E_ERR_PARAM; @@ -84,13 +85,13 @@ i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw, } /** - * i40evf_free_dma_mem_d - OS specific memory free for shared code + * iavf_free_dma_mem_d - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem) { - struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; + struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; if (!mem || !mem->va) return I40E_ERR_PARAM; @@ -100,13 +101,13 @@ i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) } /** - * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code + * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested **/ -i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw, - struct i40e_virt_mem *mem, u32 size) +iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, + struct iavf_virt_mem *mem, u32 size) { if (!mem) return I40E_ERR_PARAM; @@ -121,12 +122,11 @@ i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw, } /** - * i40evf_free_virt_mem_d - OS specific memory free for shared code + * iavf_free_virt_mem_d - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw, - struct i40e_virt_mem *mem) +iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem) { if (!mem) return I40E_ERR_PARAM; @@ -138,17 +138,17 @@ i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw, } /** - * i40evf_debug_d - OS dependent version of debug printing + * iavf_debug_d - OS dependent version of debug printing * @hw: pointer to the HW structure * @mask: debug level mask * @fmt_str: printf-type format description **/ -void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) +void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...) { char buf[512]; va_list argptr; - if (!(mask & ((struct i40e_hw *)hw)->debug_mask)) + if (!(mask & ((struct iavf_hw *)hw)->debug_mask)) return; va_start(argptr, fmt_str); @@ -160,134 +160,131 @@ void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) } /** - * i40evf_schedule_reset - Set the flags and schedule a reset event + * iavf_schedule_reset - Set the flags and schedule a reset event * @adapter: board private structure **/ -void i40evf_schedule_reset(struct i40evf_adapter *adapter) +void iavf_schedule_reset(struct iavf_adapter *adapter) { if (!(adapter->flags & - (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; schedule_work(&adapter->reset_task); } } /** - * i40evf_tx_timeout - Respond to a Tx Hang + * iavf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ -static void i40evf_tx_timeout(struct net_device *netdev) +static void iavf_tx_timeout(struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; - i40evf_schedule_reset(adapter); + iavf_schedule_reset(adapter); } /** - * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC + * iavf_misc_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ -static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) +static void iavf_misc_irq_disable(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; if (!adapter->msix_entries) return; - wr32(hw, I40E_VFINT_DYN_CTL01, 0); + wr32(hw, IAVF_VFINT_DYN_CTL01, 0); - /* read flush */ - rd32(hw, I40E_VFGEN_RSTAT); + iavf_flush(hw); synchronize_irq(adapter->msix_entries[0].vector); } /** - * i40evf_misc_irq_enable - Enable default interrupt generation settings + * iavf_misc_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ -static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) +static void iavf_misc_irq_enable(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; - wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | - I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); - wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK); + wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | + IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); + wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); - /* read flush */ - rd32(hw, I40E_VFGEN_RSTAT); + iavf_flush(hw); } /** - * i40evf_irq_disable - Mask off interrupt generation on the NIC + * iavf_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ -static void i40evf_irq_disable(struct i40evf_adapter *adapter) +static void iavf_irq_disable(struct iavf_adapter *adapter) { int i; - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; if (!adapter->msix_entries) return; for (i = 1; i < adapter->num_msix_vectors; i++) { - wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); + wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); synchronize_irq(adapter->msix_entries[i].vector); } - /* read flush */ - rd32(hw, I40E_VFGEN_RSTAT); + iavf_flush(hw); } /** - * i40evf_irq_enable_queues - Enable interrupt for specified queues + * iavf_irq_enable_queues - Enable interrupt for specified queues * @adapter: board private structure * @mask: bitmap of queues to enable **/ -void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) +void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; int i; for (i = 1; i < adapter->num_msix_vectors; i++) { if (mask & BIT(i - 1)) { - wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), - I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); + wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), + IAVF_VFINT_DYN_CTLN1_INTENA_MASK | + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); } } } /** - * i40evf_irq_enable - Enable default interrupt generation settings + * iavf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure * @flush: boolean value whether to run rd32() **/ -void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush) +void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; - i40evf_misc_irq_enable(adapter); - i40evf_irq_enable_queues(adapter, ~0); + iavf_misc_irq_enable(adapter); + iavf_irq_enable_queues(adapter, ~0); if (flush) - rd32(hw, I40E_VFGEN_RSTAT); + iavf_flush(hw); } /** - * i40evf_msix_aq - Interrupt handler for vector 0 + * iavf_msix_aq - Interrupt handler for vector 0 * @irq: interrupt number * @data: pointer to netdev **/ -static irqreturn_t i40evf_msix_aq(int irq, void *data) +static irqreturn_t iavf_msix_aq(int irq, void *data) { struct net_device *netdev = data; - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_hw *hw = &adapter->hw; /* handle non-queue interrupts, these reads clear the registers */ - rd32(hw, I40E_VFINT_ICR01); - rd32(hw, I40E_VFINT_ICR0_ENA1); + rd32(hw, IAVF_VFINT_ICR01); + rd32(hw, IAVF_VFINT_ICR0_ENA1); /* schedule work on the private workqueue */ schedule_work(&adapter->adminq_task); @@ -296,13 +293,13 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data) } /** - * i40evf_msix_clean_rings - MSIX mode Interrupt Handler + * iavf_msix_clean_rings - MSIX mode Interrupt Handler * @irq: interrupt number * @data: pointer to a q_vector **/ -static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) +static irqreturn_t iavf_msix_clean_rings(int irq, void *data) { - struct i40e_q_vector *q_vector = data; + struct iavf_q_vector *q_vector = data; if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; @@ -313,17 +310,17 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) } /** - * i40evf_map_vector_to_rxq - associate irqs with rx queues + * iavf_map_vector_to_rxq - associate irqs with rx queues * @adapter: board private structure * @v_idx: interrupt number * @r_idx: queue number **/ static void -i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) +iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; - struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx]; - struct i40e_hw *hw = &adapter->hw; + struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; + struct iavf_hw *hw = &adapter->hw; rx_ring->q_vector = q_vector; rx_ring->next = q_vector->rx.ring; @@ -333,23 +330,23 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) q_vector->rx.next_update = jiffies + 1; q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector->ring_mask |= BIT(r_idx); - wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx), + wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), q_vector->rx.current_itr); q_vector->rx.current_itr = q_vector->rx.target_itr; } /** - * i40evf_map_vector_to_txq - associate irqs with tx queues + * iavf_map_vector_to_txq - associate irqs with tx queues * @adapter: board private structure * @v_idx: interrupt number * @t_idx: queue number **/ static void -i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) +iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; - struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx]; - struct i40e_hw *hw = &adapter->hw; + struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; + struct iavf_hw *hw = &adapter->hw; tx_ring->q_vector = q_vector; tx_ring->next = q_vector->tx.ring; @@ -359,13 +356,13 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) q_vector->tx.next_update = jiffies + 1; q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); q_vector->num_ringpairs++; - wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx), + wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), q_vector->tx.target_itr); q_vector->tx.current_itr = q_vector->tx.target_itr; } /** - * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors + * iavf_map_rings_to_vectors - Maps descriptor rings to vectors * @adapter: board private structure to initialize * * This function maps descriptor rings to the queue-specific vectors @@ -374,7 +371,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) * group the rings as "efficiently" as possible. You would add new * mapping configurations in here. **/ -static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) +static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) { int rings_remaining = adapter->num_active_queues; int ridx = 0, vidx = 0; @@ -383,8 +380,8 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (; ridx < rings_remaining; ridx++) { - i40evf_map_vector_to_rxq(adapter, vidx, ridx); - i40evf_map_vector_to_txq(adapter, vidx, ridx); + iavf_map_vector_to_rxq(adapter, vidx, ridx); + iavf_map_vector_to_txq(adapter, vidx, ridx); /* In the case where we have more queues than vectors, continue * round-robin on vectors until all queues are mapped. @@ -393,38 +390,38 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) vidx = 0; } - adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; + adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; } /** - * i40evf_irq_affinity_notify - Callback for affinity changes + * iavf_irq_affinity_notify - Callback for affinity changes * @notify: context as to what irq was changed * @mask: the new affinity mask * * This is a callback function used by the irq_set_affinity_notifier function * so that we may register to receive changes to the irq affinity masks. **/ -static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) +static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) { - struct i40e_q_vector *q_vector = - container_of(notify, struct i40e_q_vector, affinity_notify); + struct iavf_q_vector *q_vector = + container_of(notify, struct iavf_q_vector, affinity_notify); cpumask_copy(&q_vector->affinity_mask, mask); } /** - * i40evf_irq_affinity_release - Callback for affinity notifier release + * iavf_irq_affinity_release - Callback for affinity notifier release * @ref: internal core kernel usage * * This is a callback function used by the irq_set_affinity_notifier function * to inform the current notification subscriber that they will no longer * receive notifications. **/ -static void i40evf_irq_affinity_release(struct kref *ref) {} +static void iavf_irq_affinity_release(struct kref *ref) {} /** - * i40evf_request_traffic_irqs - Initialize MSI-X interrupts + * iavf_request_traffic_irqs - Initialize MSI-X interrupts * @adapter: board private structure * @basename: device basename * @@ -432,37 +429,38 @@ static void i40evf_irq_affinity_release(struct kref *ref) {} * interrupts from the kernel. **/ static int -i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) +iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) { unsigned int vector, q_vectors; unsigned int rx_int_idx = 0, tx_int_idx = 0; int irq_num, err; int cpu; - i40evf_irq_disable(adapter); + iavf_irq_disable(adapter); /* Decrement for Other and TCP Timer vectors */ q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (vector = 0; vector < q_vectors; vector++) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[vector]; + struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; + irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "i40evf-%s-TxRx-%d", basename, rx_int_idx++); + "iavf-%s-TxRx-%d", basename, rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "i40evf-%s-rx-%d", basename, rx_int_idx++); + "iavf-%s-rx-%d", basename, rx_int_idx++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "i40evf-%s-tx-%d", basename, tx_int_idx++); + "iavf-%s-tx-%d", basename, tx_int_idx++); } else { /* skip this unused q_vector */ continue; } err = request_irq(irq_num, - i40evf_msix_clean_rings, + iavf_msix_clean_rings, 0, q_vector->name, q_vector); @@ -472,9 +470,9 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) goto free_queue_irqs; } /* register for affinity change notifications */ - q_vector->affinity_notify.notify = i40evf_irq_affinity_notify; + q_vector->affinity_notify.notify = iavf_irq_affinity_notify; q_vector->affinity_notify.release = - i40evf_irq_affinity_release; + iavf_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); /* Spread the IRQ affinity hints across online CPUs. Note that * get_cpu_mask returns a mask with a permanent lifetime so @@ -498,23 +496,23 @@ free_queue_irqs: } /** - * i40evf_request_misc_irq - Initialize MSI-X interrupts + * iavf_request_misc_irq - Initialize MSI-X interrupts * @adapter: board private structure * * Allocates MSI-X vector 0 and requests interrupts from the kernel. This * vector is only for the admin queue, and stays active even when the netdev * is closed. **/ -static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) +static int iavf_request_misc_irq(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; snprintf(adapter->misc_vector_name, - sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx", + sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", dev_name(&adapter->pdev->dev)); err = request_irq(adapter->msix_entries[0].vector, - &i40evf_msix_aq, 0, + &iavf_msix_aq, 0, adapter->misc_vector_name, netdev); if (err) { dev_err(&adapter->pdev->dev, @@ -526,12 +524,12 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) } /** - * i40evf_free_traffic_irqs - Free MSI-X interrupts + * iavf_free_traffic_irqs - Free MSI-X interrupts * @adapter: board private structure * * Frees all MSI-X vectors other than 0. **/ -static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) +static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) { int vector, irq_num, q_vectors; @@ -549,12 +547,12 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) } /** - * i40evf_free_misc_irq - Free MSI-X miscellaneous vector + * iavf_free_misc_irq - Free MSI-X miscellaneous vector * @adapter: board private structure * * Frees MSI-X vector 0. **/ -static void i40evf_free_misc_irq(struct i40evf_adapter *adapter) +static void iavf_free_misc_irq(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -565,58 +563,58 @@ static void i40evf_free_misc_irq(struct i40evf_adapter *adapter) } /** - * i40evf_configure_tx - Configure Transmit Unit after Reset + * iavf_configure_tx - Configure Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ -static void i40evf_configure_tx(struct i40evf_adapter *adapter) +static void iavf_configure_tx(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; int i; for (i = 0; i < adapter->num_active_queues; i++) - adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i); + adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); } /** - * i40evf_configure_rx - Configure Receive Unit after Reset + * iavf_configure_rx - Configure Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ -static void i40evf_configure_rx(struct i40evf_adapter *adapter) +static void iavf_configure_rx(struct iavf_adapter *adapter) { - unsigned int rx_buf_len = I40E_RXBUFFER_2048; - struct i40e_hw *hw = &adapter->hw; + unsigned int rx_buf_len = IAVF_RXBUFFER_2048; + struct iavf_hw *hw = &adapter->hw; int i; /* Legacy Rx will always default to a 2048 buffer size. */ #if (PAGE_SIZE < 8192) - if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) { + if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { struct net_device *netdev = adapter->netdev; /* For jumbo frames on systems with 4K pages we have to use * an order 1 page, so we might as well increase the size * of our Rx buffer to make better use of the available space */ - rx_buf_len = I40E_RXBUFFER_3072; + rx_buf_len = IAVF_RXBUFFER_3072; /* We use a 1536 buffer size for configurations with * standard Ethernet mtu. On x86 this gives us enough room * for shared info and 192 bytes of padding. */ - if (!I40E_2K_TOO_SMALL_WITH_PADDING && + if (!IAVF_2K_TOO_SMALL_WITH_PADDING && (netdev->mtu <= ETH_DATA_LEN)) - rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; } #endif for (i = 0; i < adapter->num_active_queues; i++) { - adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); + adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); adapter->rx_rings[i].rx_buf_len = rx_buf_len; - if (adapter->flags & I40EVF_FLAG_LEGACY_RX) + if (adapter->flags & IAVF_FLAG_LEGACY_RX) clear_ring_build_skb_enabled(&adapter->rx_rings[i]); else set_ring_build_skb_enabled(&adapter->rx_rings[i]); @@ -624,7 +622,7 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) } /** - * i40evf_find_vlan - Search filter list for specific vlan filter + * iavf_find_vlan - Search filter list for specific vlan filter * @adapter: board private structure * @vlan: vlan tag * @@ -632,9 +630,9 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) * mac_vlan_list_lock. **/ static struct -i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) +iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) { - struct i40evf_vlan_filter *f; + struct iavf_vlan_filter *f; list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (vlan == f->vlan) @@ -644,20 +642,20 @@ i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) } /** - * i40evf_add_vlan - Add a vlan filter to the list + * iavf_add_vlan - Add a vlan filter to the list * @adapter: board private structure * @vlan: VLAN tag * * Returns ptr to the filter object or NULL when no memory available. **/ static struct -i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) +iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) { - struct i40evf_vlan_filter *f = NULL; + struct iavf_vlan_filter *f = NULL; spin_lock_bh(&adapter->mac_vlan_list_lock); - f = i40evf_find_vlan(adapter, vlan); + f = iavf_find_vlan(adapter, vlan); if (!f) { f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) @@ -668,7 +666,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) INIT_LIST_HEAD(&f->list); list_add(&f->list, &adapter->vlan_filter_list); f->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; } clearout: @@ -677,63 +675,63 @@ clearout: } /** - * i40evf_del_vlan - Remove a vlan filter from the list + * iavf_del_vlan - Remove a vlan filter from the list * @adapter: board private structure * @vlan: VLAN tag **/ -static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) +static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) { - struct i40evf_vlan_filter *f; + struct iavf_vlan_filter *f; spin_lock_bh(&adapter->mac_vlan_list_lock); - f = i40evf_find_vlan(adapter, vlan); + f = iavf_find_vlan(adapter, vlan); if (f) { f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; } spin_unlock_bh(&adapter->mac_vlan_list_lock); } /** - * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device + * iavf_vlan_rx_add_vid - Add a VLAN filter to a device * @netdev: network device struct * @proto: unused protocol data * @vid: VLAN tag **/ -static int i40evf_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) +static int iavf_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); if (!VLAN_ALLOWED(adapter)) return -EIO; - if (i40evf_add_vlan(adapter, vid) == NULL) + if (iavf_add_vlan(adapter, vid) == NULL) return -ENOMEM; return 0; } /** - * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device + * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device * @netdev: network device struct * @proto: unused protocol data * @vid: VLAN tag **/ -static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) +static int iavf_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); if (VLAN_ALLOWED(adapter)) { - i40evf_del_vlan(adapter, vid); + iavf_del_vlan(adapter, vid); return 0; } return -EIO; } /** - * i40evf_find_filter - Search filter list for specific mac filter + * iavf_find_filter - Search filter list for specific mac filter * @adapter: board private structure * @macaddr: the MAC address * @@ -741,10 +739,10 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, * mac_vlan_list_lock. **/ static struct -i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, - const u8 *macaddr) +iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, + const u8 *macaddr) { - struct i40evf_mac_filter *f; + struct iavf_mac_filter *f; if (!macaddr) return NULL; @@ -757,22 +755,22 @@ i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, } /** - * i40e_add_filter - Add a mac filter to the filter list + * iavf_add_filter - Add a mac filter to the filter list * @adapter: board private structure * @macaddr: the MAC address * * Returns ptr to the filter object or NULL when no memory available. **/ static struct -i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, - const u8 *macaddr) +iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, + const u8 *macaddr) { - struct i40evf_mac_filter *f; + struct iavf_mac_filter *f; if (!macaddr) return NULL; - f = i40evf_find_filter(adapter, macaddr); + f = iavf_find_filter(adapter, macaddr); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) @@ -782,7 +780,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; } else { f->remove = false; } @@ -791,17 +789,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, } /** - * i40evf_set_mac - NDO callback to set port mac address + * iavf_set_mac - NDO callback to set port mac address * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ -static int i40evf_set_mac(struct net_device *netdev, void *p) +static int iavf_set_mac(struct net_device *netdev, void *p) { - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; - struct i40evf_mac_filter *f; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_hw *hw = &adapter->hw; + struct iavf_mac_filter *f; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) @@ -810,18 +808,18 @@ static int i40evf_set_mac(struct net_device *netdev, void *p) if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) return 0; - if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF) + if (adapter->flags & IAVF_FLAG_ADDR_SET_BY_PF) return -EPERM; spin_lock_bh(&adapter->mac_vlan_list_lock); - f = i40evf_find_filter(adapter, hw->mac.addr); + f = iavf_find_filter(adapter, hw->mac.addr); if (f) { f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; } - f = i40evf_add_filter(adapter, addr->sa_data); + f = iavf_add_filter(adapter, addr->sa_data); spin_unlock_bh(&adapter->mac_vlan_list_lock); @@ -834,35 +832,35 @@ static int i40evf_set_mac(struct net_device *netdev, void *p) } /** - * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address + * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be added. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. */ -static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr) +static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); - if (i40evf_add_filter(adapter, addr)) + if (iavf_add_filter(adapter, addr)) return 0; else return -ENOMEM; } /** - * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address + * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. */ -static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr) +static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) { - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40evf_mac_filter *f; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_mac_filter *f; /* Under some circumstances, we might receive a request to delete * our own device address from our uc list. Because we store the @@ -872,50 +870,50 @@ static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr) if (ether_addr_equal(addr, netdev->dev_addr)) return 0; - f = i40evf_find_filter(adapter, addr); + f = iavf_find_filter(adapter, addr); if (f) { f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; } return 0; } /** - * i40evf_set_rx_mode - NDO callback to set the netdev filters + * iavf_set_rx_mode - NDO callback to set the netdev filters * @netdev: network interface device structure **/ -static void i40evf_set_rx_mode(struct net_device *netdev) +static void iavf_set_rx_mode(struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); spin_lock_bh(&adapter->mac_vlan_list_lock); - __dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync); - __dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync); + __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); + __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); spin_unlock_bh(&adapter->mac_vlan_list_lock); if (netdev->flags & IFF_PROMISC && - !(adapter->flags & I40EVF_FLAG_PROMISC_ON)) - adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC; + !(adapter->flags & IAVF_FLAG_PROMISC_ON)) + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; else if (!(netdev->flags & IFF_PROMISC) && - adapter->flags & I40EVF_FLAG_PROMISC_ON) - adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC; + adapter->flags & IAVF_FLAG_PROMISC_ON) + adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; if (netdev->flags & IFF_ALLMULTI && - !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON)) - adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; else if (!(netdev->flags & IFF_ALLMULTI) && - adapter->flags & I40EVF_FLAG_ALLMULTI_ON) - adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI; + adapter->flags & IAVF_FLAG_ALLMULTI_ON) + adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; } /** - * i40evf_napi_enable_all - enable NAPI on all queue vectors + * iavf_napi_enable_all - enable NAPI on all queue vectors * @adapter: board private structure **/ -static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) +static void iavf_napi_enable_all(struct iavf_adapter *adapter) { int q_idx; - struct i40e_q_vector *q_vector; + struct iavf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { @@ -928,13 +926,13 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) } /** - * i40evf_napi_disable_all - disable NAPI on all queue vectors + * iavf_napi_disable_all - disable NAPI on all queue vectors * @adapter: board private structure **/ -static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) +static void iavf_napi_disable_all(struct iavf_adapter *adapter) { int q_idx; - struct i40e_q_vector *q_vector; + struct iavf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { @@ -944,67 +942,67 @@ static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) } /** - * i40evf_configure - set up transmit and receive data structures + * iavf_configure - set up transmit and receive data structures * @adapter: board private structure **/ -static void i40evf_configure(struct i40evf_adapter *adapter) +static void iavf_configure(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int i; - i40evf_set_rx_mode(netdev); + iavf_set_rx_mode(netdev); - i40evf_configure_tx(adapter); - i40evf_configure_rx(adapter); - adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; + iavf_configure_tx(adapter); + iavf_configure_rx(adapter); + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; for (i = 0; i < adapter->num_active_queues; i++) { - struct i40e_ring *ring = &adapter->rx_rings[i]; + struct iavf_ring *ring = &adapter->rx_rings[i]; - i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); } } /** - * i40evf_up_complete - Finish the last steps of bringing up a connection + * iavf_up_complete - Finish the last steps of bringing up a connection * @adapter: board private structure * - * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. + * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. **/ -static void i40evf_up_complete(struct i40evf_adapter *adapter) +static void iavf_up_complete(struct iavf_adapter *adapter) { - adapter->state = __I40EVF_RUNNING; - clear_bit(__I40E_VSI_DOWN, adapter->vsi.state); + adapter->state = __IAVF_RUNNING; + clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); - i40evf_napi_enable_all(adapter); + iavf_napi_enable_all(adapter); - adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES; + adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; if (CLIENT_ENABLED(adapter)) - adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN; + adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } /** - * i40e_down - Shutdown the connection processing + * iavf_down - Shutdown the connection processing * @adapter: board private structure * - * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. + * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. **/ -void i40evf_down(struct i40evf_adapter *adapter) +void iavf_down(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; - struct i40evf_vlan_filter *vlf; - struct i40evf_mac_filter *f; - struct i40evf_cloud_filter *cf; + struct iavf_vlan_filter *vlf; + struct iavf_mac_filter *f; + struct iavf_cloud_filter *cf; - if (adapter->state <= __I40EVF_DOWN_PENDING) + if (adapter->state <= __IAVF_DOWN_PENDING) return; netif_carrier_off(netdev); netif_tx_disable(netdev); adapter->link_up = false; - i40evf_napi_disable_all(adapter); - i40evf_irq_disable(adapter); + iavf_napi_disable_all(adapter); + iavf_irq_disable(adapter); spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -1031,25 +1029,25 @@ void i40evf_down(struct i40evf_adapter *adapter) } spin_unlock_bh(&adapter->cloud_filter_list_lock); - if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && - adapter->state != __I40EVF_RESETTING) { + if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && + adapter->state != __IAVF_RESETTING) { /* cancel any current operation */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait * here for this to complete. The watchdog is still running * and it will take care of this. */ - adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; + adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; } mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } /** - * i40evf_acquire_msix_vectors - Setup the MSIX capability + * iavf_acquire_msix_vectors - Setup the MSIX capability * @adapter: board private structure * @vectors: number of vectors to request * @@ -1058,7 +1056,7 @@ void i40evf_down(struct i40evf_adapter *adapter) * Returns 0 on success, negative on failure **/ static int -i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) +iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) { int err, vector_threshold; @@ -1092,12 +1090,12 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) } /** - * i40evf_free_queues - Free memory for all rings + * iavf_free_queues - Free memory for all rings * @adapter: board private structure to initialize * * Free all of the memory associated with queue pairs. **/ -static void i40evf_free_queues(struct i40evf_adapter *adapter) +static void iavf_free_queues(struct iavf_adapter *adapter) { if (!adapter->vsi_res) return; @@ -1109,14 +1107,14 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter) } /** - * i40evf_alloc_queues - Allocate memory for all rings + * iavf_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * * We allocate one ring per queue at run-time since we don't know the * number of queues at compile-time. The polling_netdev array is * intended for Multiqueue, but should work fine with a single queue. **/ -static int i40evf_alloc_queues(struct i40evf_adapter *adapter) +static int iavf_alloc_queues(struct iavf_adapter *adapter) { int i, num_active_queues; @@ -1137,17 +1135,17 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) adapter->tx_rings = kcalloc(num_active_queues, - sizeof(struct i40e_ring), GFP_KERNEL); + sizeof(struct iavf_ring), GFP_KERNEL); if (!adapter->tx_rings) goto err_out; adapter->rx_rings = kcalloc(num_active_queues, - sizeof(struct i40e_ring), GFP_KERNEL); + sizeof(struct iavf_ring), GFP_KERNEL); if (!adapter->rx_rings) goto err_out; for (i = 0; i < num_active_queues; i++) { - struct i40e_ring *tx_ring; - struct i40e_ring *rx_ring; + struct iavf_ring *tx_ring; + struct iavf_ring *rx_ring; tx_ring = &adapter->tx_rings[i]; @@ -1155,16 +1153,16 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) tx_ring->netdev = adapter->netdev; tx_ring->dev = &adapter->pdev->dev; tx_ring->count = adapter->tx_desc_count; - tx_ring->itr_setting = I40E_ITR_TX_DEF; - if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE) - tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; + tx_ring->itr_setting = IAVF_ITR_TX_DEF; + if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) + tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; rx_ring = &adapter->rx_rings[i]; rx_ring->queue_index = i; rx_ring->netdev = adapter->netdev; rx_ring->dev = &adapter->pdev->dev; rx_ring->count = adapter->rx_desc_count; - rx_ring->itr_setting = I40E_ITR_RX_DEF; + rx_ring->itr_setting = IAVF_ITR_RX_DEF; } adapter->num_active_queues = num_active_queues; @@ -1172,18 +1170,18 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) return 0; err_out: - i40evf_free_queues(adapter); + iavf_free_queues(adapter); return -ENOMEM; } /** - * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported + * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported * @adapter: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ -static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) +static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) { int vector, v_budget; int pairs = 0; @@ -1213,7 +1211,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; - err = i40evf_acquire_msix_vectors(adapter, v_budget); + err = iavf_acquire_msix_vectors(adapter, v_budget); out: netif_set_real_num_rx_queues(adapter->netdev, pairs); @@ -1222,16 +1220,16 @@ out: } /** - * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands + * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands * @adapter: board private structure * * Return 0 on success, negative on failure **/ -static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) +static int iavf_config_rss_aq(struct iavf_adapter *adapter) { struct i40e_aqc_get_set_rss_key_data *rss_key = (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key; - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; int ret = 0; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { @@ -1241,21 +1239,21 @@ static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) return -EBUSY; } - ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); + ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); if (ret) { dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); + iavf_stat_str(hw, ret), + iavf_aq_str(hw, hw->aq.asq_last_status)); return ret; } - ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false, - adapter->rss_lut, adapter->rss_lut_size); + ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, + adapter->rss_lut, adapter->rss_lut_size); if (ret) { dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); + iavf_stat_str(hw, ret), + iavf_aq_str(hw, hw->aq.asq_last_status)); } return ret; @@ -1263,55 +1261,55 @@ static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) } /** - * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers + * iavf_config_rss_reg - Configure RSS keys and lut by writing registers * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -static int i40evf_config_rss_reg(struct i40evf_adapter *adapter) +static int iavf_config_rss_reg(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; u32 *dw; u16 i; dw = (u32 *)adapter->rss_key; for (i = 0; i <= adapter->rss_key_size / 4; i++) - wr32(hw, I40E_VFQF_HKEY(i), dw[i]); + wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); dw = (u32 *)adapter->rss_lut; for (i = 0; i <= adapter->rss_lut_size / 4; i++) - wr32(hw, I40E_VFQF_HLUT(i), dw[i]); + wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); - i40e_flush(hw); + iavf_flush(hw); return 0; } /** - * i40evf_config_rss - Configure RSS keys and lut + * iavf_config_rss - Configure RSS keys and lut * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -int i40evf_config_rss(struct i40evf_adapter *adapter) +int iavf_config_rss(struct iavf_adapter *adapter) { if (RSS_PF(adapter)) { - adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT | - I40EVF_FLAG_AQ_SET_RSS_KEY; + adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | + IAVF_FLAG_AQ_SET_RSS_KEY; return 0; } else if (RSS_AQ(adapter)) { - return i40evf_config_rss_aq(adapter); + return iavf_config_rss_aq(adapter); } else { - return i40evf_config_rss_reg(adapter); + return iavf_config_rss_reg(adapter); } } /** - * i40evf_fill_rss_lut - Fill the lut with default values + * iavf_fill_rss_lut - Fill the lut with default values * @adapter: board private structure **/ -static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter) +static void iavf_fill_rss_lut(struct iavf_adapter *adapter) { u16 i; @@ -1320,47 +1318,46 @@ static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter) } /** - * i40evf_init_rss - Prepare for RSS + * iavf_init_rss - Prepare for RSS * @adapter: board private structure * * Return 0 on success, negative on failure **/ -static int i40evf_init_rss(struct i40evf_adapter *adapter) +static int iavf_init_rss(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; int ret; if (!RSS_PF(adapter)) { /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED; + adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; else - adapter->hena = I40E_DEFAULT_RSS_HENA; + adapter->hena = IAVF_DEFAULT_RSS_HENA; - wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32)); + wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); + wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); } - i40evf_fill_rss_lut(adapter); - + iavf_fill_rss_lut(adapter); netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); - ret = i40evf_config_rss(adapter); + ret = iavf_config_rss(adapter); return ret; } /** - * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors + * iavf_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ -static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) +static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) { int q_idx = 0, num_q_vectors; - struct i40e_q_vector *q_vector; + struct iavf_q_vector *q_vector; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), @@ -1376,21 +1373,21 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) q_vector->reg_idx = q_idx; cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); netif_napi_add(adapter->netdev, &q_vector->napi, - i40evf_napi_poll, NAPI_POLL_WEIGHT); + iavf_napi_poll, NAPI_POLL_WEIGHT); } return 0; } /** - * i40evf_free_q_vectors - Free memory allocated for interrupt vectors + * iavf_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ -static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) +static void iavf_free_q_vectors(struct iavf_adapter *adapter) { int q_idx, num_q_vectors; int napi_vectors; @@ -1402,7 +1399,8 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) napi_vectors = adapter->num_active_queues; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx]; + struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; + if (q_idx < napi_vectors) netif_napi_del(&q_vector->napi); } @@ -1411,11 +1409,11 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) } /** - * i40evf_reset_interrupt_capability - Reset MSIX setup + * iavf_reset_interrupt_capability - Reset MSIX setup * @adapter: board private structure * **/ -void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter) +void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) { if (!adapter->msix_entries) return; @@ -1426,15 +1424,15 @@ void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter) } /** - * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init + * iavf_init_interrupt_scheme - Determine if MSIX is supported and init * @adapter: board private structure to initialize * **/ -int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) +int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) { int err; - err = i40evf_alloc_queues(adapter); + err = iavf_alloc_queues(adapter); if (err) { dev_err(&adapter->pdev->dev, "Unable to allocate memory for queues\n"); @@ -1442,7 +1440,7 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) } rtnl_lock(); - err = i40evf_set_interrupt_capability(adapter); + err = iavf_set_interrupt_capability(adapter); rtnl_unlock(); if (err) { dev_err(&adapter->pdev->dev, @@ -1450,7 +1448,7 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) goto err_set_interrupt; } - err = i40evf_alloc_q_vectors(adapter); + err = iavf_alloc_q_vectors(adapter); if (err) { dev_err(&adapter->pdev->dev, "Unable to allocate memory for queue vectors\n"); @@ -1473,18 +1471,18 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) return 0; err_alloc_q_vectors: - i40evf_reset_interrupt_capability(adapter); + iavf_reset_interrupt_capability(adapter); err_set_interrupt: - i40evf_free_queues(adapter); + iavf_free_queues(adapter); err_alloc_queues: return err; } /** - * i40evf_free_rss - Free memory used by RSS structs + * iavf_free_rss - Free memory used by RSS structs * @adapter: board private structure **/ -static void i40evf_free_rss(struct i40evf_adapter *adapter) +static void iavf_free_rss(struct iavf_adapter *adapter) { kfree(adapter->rss_key); adapter->rss_key = NULL; @@ -1494,52 +1492,52 @@ static void i40evf_free_rss(struct i40evf_adapter *adapter) } /** - * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors + * iavf_reinit_interrupt_scheme - Reallocate queues and vectors * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter) +static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; if (netif_running(netdev)) - i40evf_free_traffic_irqs(adapter); - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - i40evf_free_q_vectors(adapter); - i40evf_free_queues(adapter); + iavf_free_traffic_irqs(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); + iavf_free_q_vectors(adapter); + iavf_free_queues(adapter); - err = i40evf_init_interrupt_scheme(adapter); + err = iavf_init_interrupt_scheme(adapter); if (err) goto err; netif_tx_stop_all_queues(netdev); - err = i40evf_request_misc_irq(adapter); + err = iavf_request_misc_irq(adapter); if (err) goto err; - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); - i40evf_map_rings_to_vectors(adapter); + iavf_map_rings_to_vectors(adapter); if (RSS_AQ(adapter)) - adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; else - err = i40evf_init_rss(adapter); + err = iavf_init_rss(adapter); err: return err; } /** - * i40evf_watchdog_timer - Periodic call-back timer + * iavf_watchdog_timer - Periodic call-back timer * @data: pointer to adapter disguised as unsigned long **/ -static void i40evf_watchdog_timer(struct timer_list *t) +static void iavf_watchdog_timer(struct timer_list *t) { - struct i40evf_adapter *adapter = from_timer(adapter, t, + struct iavf_adapter *adapter = from_timer(adapter, t, watchdog_timer); schedule_work(&adapter->watchdog_task); @@ -1547,31 +1545,31 @@ static void i40evf_watchdog_timer(struct timer_list *t) } /** - * i40evf_watchdog_task - Periodic call-back task + * iavf_watchdog_task - Periodic call-back task * @work: pointer to work_struct **/ -static void i40evf_watchdog_task(struct work_struct *work) +static void iavf_watchdog_task(struct work_struct *work) { - struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, + struct iavf_adapter *adapter = container_of(work, + struct iavf_adapter, watchdog_task); - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; u32 reg_val; - if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) + if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) goto restart_watchdog; - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { - reg_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { + reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if ((reg_val == VIRTCHNL_VFR_VFACTIVE) || (reg_val == VIRTCHNL_VFR_COMPLETED)) { /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); - adapter->state = __I40EVF_STARTUP; - adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; + adapter->state = __IAVF_STARTUP; + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; schedule_delayed_work(&adapter->init_task, 10); - clear_bit(__I40EVF_IN_CRITICAL_TASK, + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); /* Don't reschedule the watchdog, since we've restarted * the init task. When init_task contacts the PF and @@ -1585,15 +1583,15 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; } - if ((adapter->state < __I40EVF_DOWN) || - (adapter->flags & I40EVF_FLAG_RESET_PENDING)) + if ((adapter->state < __IAVF_DOWN) || + (adapter->flags & IAVF_FLAG_RESET_PENDING)) goto watchdog_done; /* check for reset */ - reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK; - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) { - adapter->state = __I40EVF_RESETTING; - adapter->flags |= I40EVF_FLAG_RESET_PENDING; + reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; + if (!(adapter->flags & IAVF_FLAG_RESET_PENDING) && !reg_val) { + adapter->state = __IAVF_RESETTING; + adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); schedule_work(&adapter->reset_task); adapter->aq_required = 0; @@ -1605,140 +1603,140 @@ static void i40evf_watchdog_task(struct work_struct *work) * here so we don't race on the admin queue. */ if (adapter->current_op) { - if (!i40evf_asq_done(hw)) { + if (!iavf_asq_done(hw)) { dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); - i40evf_send_api_ver(adapter); + iavf_send_api_ver(adapter); } goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) { - i40evf_send_vf_config_msg(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) { + iavf_send_vf_config_msg(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { - i40evf_disable_queues(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { + iavf_disable_queues(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) { - i40evf_map_queues(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { + iavf_map_queues(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) { - i40evf_add_ether_addrs(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { + iavf_add_ether_addrs(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) { - i40evf_add_vlans(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { + iavf_add_vlans(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) { - i40evf_del_ether_addrs(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { + iavf_del_ether_addrs(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) { - i40evf_del_vlans(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { + iavf_del_vlans(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { - i40evf_enable_vlan_stripping(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { + iavf_enable_vlan_stripping(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { - i40evf_disable_vlan_stripping(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { + iavf_disable_vlan_stripping(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) { - i40evf_configure_queues(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { + iavf_configure_queues(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) { - i40evf_enable_queues(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { + iavf_enable_queues(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) { + if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { /* This message goes straight to the firmware, not the * PF, so we don't have to set current_op as we will * not get a response through the ARQ. */ - i40evf_init_rss(adapter); - adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; + iavf_init_rss(adapter); + adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) { - i40evf_get_hena(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { + iavf_get_hena(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) { - i40evf_set_hena(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { + iavf_set_hena(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) { - i40evf_set_rss_key(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { + iavf_set_rss_key(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) { - i40evf_set_rss_lut(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { + iavf_set_rss_lut(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) { - i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | + if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { + iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) { - i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); + if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { + iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); goto watchdog_done; } - if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) && - (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) { - i40evf_set_promiscuous(adapter, 0); + if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && + (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { + iavf_set_promiscuous(adapter, 0); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) { - i40evf_enable_channels(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { + iavf_enable_channels(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) { - i40evf_disable_channels(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { + iavf_disable_channels(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) { - i40evf_add_cloud_filter(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { + iavf_add_cloud_filter(adapter); goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) { - i40evf_del_cloud_filter(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { + iavf_del_cloud_filter(adapter); goto watchdog_done; } schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); - if (adapter->state == __I40EVF_RUNNING) - i40evf_request_stats(adapter); + if (adapter->state == __IAVF_RUNNING) + iavf_request_stats(adapter); watchdog_done: - if (adapter->state == __I40EVF_RUNNING) - i40evf_detect_recover_hung(&adapter->vsi); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + if (adapter->state == __IAVF_RUNNING) + iavf_detect_recover_hung(&adapter->vsi); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); restart_watchdog: - if (adapter->state == __I40EVF_REMOVE) + if (adapter->state == __IAVF_REMOVE) return; if (adapter->aq_required) mod_timer(&adapter->watchdog_timer, @@ -1748,28 +1746,28 @@ restart_watchdog: schedule_work(&adapter->adminq_task); } -static void i40evf_disable_vf(struct i40evf_adapter *adapter) +static void iavf_disable_vf(struct iavf_adapter *adapter) { - struct i40evf_mac_filter *f, *ftmp; - struct i40evf_vlan_filter *fv, *fvtmp; - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_mac_filter *f, *ftmp; + struct iavf_vlan_filter *fv, *fvtmp; + struct iavf_cloud_filter *cf, *cftmp; - adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; + adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; /* We don't use netif_running() because it may be true prior to * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - if (adapter->state == __I40EVF_RUNNING) { - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + if (adapter->state == __IAVF_RUNNING) { + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); adapter->link_up = false; - i40evf_napi_disable_all(adapter); - i40evf_irq_disable(adapter); - i40evf_free_traffic_irqs(adapter); - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); + iavf_napi_disable_all(adapter); + iavf_irq_disable(adapter); + iavf_free_traffic_irqs(adapter); + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); } spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -1795,41 +1793,41 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) } spin_unlock_bh(&adapter->cloud_filter_list_lock); - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - i40evf_free_queues(adapter); - i40evf_free_q_vectors(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); + iavf_free_queues(adapter); + iavf_free_q_vectors(adapter); kfree(adapter->vf_res); - i40evf_shutdown_adminq(&adapter->hw); + iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - adapter->state = __I40EVF_DOWN; + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + adapter->flags &= ~IAVF_FLAG_RESET_PENDING; + adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); } -#define I40EVF_RESET_WAIT_MS 10 -#define I40EVF_RESET_WAIT_COUNT 500 +#define IAVF_RESET_WAIT_MS 10 +#define IAVF_RESET_WAIT_COUNT 500 /** - * i40evf_reset_task - Call-back task to handle hardware reset + * iavf_reset_task - Call-back task to handle hardware reset * @work: pointer to work_struct * * During reset we need to shut down and reinitialize the admin queue * before we can use it to communicate with the PF again. We also clear * and reinit the rings because that context is lost as well. **/ -static void i40evf_reset_task(struct work_struct *work) +static void iavf_reset_task(struct work_struct *work) { - struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, + struct iavf_adapter *adapter = container_of(work, + struct iavf_adapter, reset_task); struct virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; - struct i40e_hw *hw = &adapter->hw; - struct i40evf_vlan_filter *vlf; - struct i40evf_cloud_filter *cf; - struct i40evf_mac_filter *f; + struct iavf_hw *hw = &adapter->hw; + struct iavf_vlan_filter *vlf; + struct iavf_cloud_filter *cf; + struct iavf_mac_filter *f; u32 reg_val; int i = 0, err; bool running; @@ -1837,63 +1835,63 @@ static void i40evf_reset_task(struct work_struct *work) /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ - if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section)) + if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) return; - while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, + while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) usleep_range(500, 1000); if (CLIENT_ENABLED(adapter)) { - adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN | - I40EVF_FLAG_CLIENT_NEEDS_CLOSE | - I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS | - I40EVF_FLAG_SERVICE_CLIENT_REQUESTED); + adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | + IAVF_FLAG_CLIENT_NEEDS_CLOSE | + IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | + IAVF_FLAG_SERVICE_CLIENT_REQUESTED); cancel_delayed_work_sync(&adapter->client_task); - i40evf_notify_client_close(&adapter->vsi, true); + iavf_notify_client_close(&adapter->vsi, true); } - i40evf_misc_irq_disable(adapter); - if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { - adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED; + iavf_misc_irq_disable(adapter); + if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { + adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; /* Restart the AQ here. If we have been reset but didn't * detect it, or if the PF had to reinit, our AQ will be hosed. */ - i40evf_shutdown_adminq(hw); - i40evf_init_adminq(hw); - i40evf_request_reset(adapter); + iavf_shutdown_adminq(hw); + iavf_init_adminq(hw); + iavf_request_reset(adapter); } - adapter->flags |= I40EVF_FLAG_RESET_PENDING; + adapter->flags |= IAVF_FLAG_RESET_PENDING; /* poll until we see the reset actually happen */ - for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { - reg_val = rd32(hw, I40E_VF_ARQLEN1) & - I40E_VF_ARQLEN1_ARQENABLE_MASK; + for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { + reg_val = rd32(hw, IAVF_VF_ARQLEN1) & + IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) break; usleep_range(5000, 10000); } - if (i == I40EVF_RESET_WAIT_COUNT) { + if (i == IAVF_RESET_WAIT_COUNT) { dev_info(&adapter->pdev->dev, "Never saw reset\n"); goto continue_reset; /* act like the reset happened */ } /* wait until the reset is complete and the PF is responding to us */ - for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { + for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { /* sleep first to make sure a minimum wait time is met */ - msleep(I40EVF_RESET_WAIT_MS); + msleep(IAVF_RESET_WAIT_MS); - reg_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; + reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if (reg_val == VIRTCHNL_VFR_VFACTIVE) break; } pci_set_master(adapter->pdev); - if (i == I40EVF_RESET_WAIT_COUNT) { + if (i == IAVF_RESET_WAIT_COUNT) { dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); - i40evf_disable_vf(adapter); - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + iavf_disable_vf(adapter); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -1902,44 +1900,44 @@ continue_reset: * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - running = ((adapter->state == __I40EVF_RUNNING) || - (adapter->state == __I40EVF_RESETTING)); + running = ((adapter->state == __IAVF_RUNNING) || + (adapter->state == __IAVF_RESETTING)); if (running) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; - i40evf_napi_disable_all(adapter); + iavf_napi_disable_all(adapter); } - i40evf_irq_disable(adapter); + iavf_irq_disable(adapter); - adapter->state = __I40EVF_RESETTING; - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + adapter->state = __IAVF_RESETTING; + adapter->flags &= ~IAVF_FLAG_RESET_PENDING; /* free the Tx/Rx rings and descriptors, might be better to just * re-use them sometime in the future */ - i40evf_free_all_rx_resources(adapter); - i40evf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + iavf_free_all_tx_resources(adapter); - adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED; + adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; /* kill and reinit the admin queue */ - i40evf_shutdown_adminq(hw); + iavf_shutdown_adminq(hw); adapter->current_op = VIRTCHNL_OP_UNKNOWN; - err = i40evf_init_adminq(hw); + err = iavf_init_adminq(hw); if (err) dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", err); adapter->aq_required = 0; - if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) { - err = i40evf_reinit_interrupt_scheme(adapter); + if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + err = iavf_reinit_interrupt_scheme(adapter); if (err) goto reset_err; } - adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG; - adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; + adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -1964,10 +1962,10 @@ continue_reset: } spin_unlock_bh(&adapter->cloud_filter_list_lock); - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; - i40evf_misc_irq_enable(adapter); + adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; + iavf_misc_irq_enable(adapter); mod_timer(&adapter->watchdog_timer, jiffies + 2); @@ -1976,84 +1974,83 @@ continue_reset: */ if (running) { /* allocate transmit descriptors */ - err = i40evf_setup_all_tx_resources(adapter); + err = iavf_setup_all_tx_resources(adapter); if (err) goto reset_err; /* allocate receive descriptors */ - err = i40evf_setup_all_rx_resources(adapter); + err = iavf_setup_all_rx_resources(adapter); if (err) goto reset_err; - if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) { - err = i40evf_request_traffic_irqs(adapter, - netdev->name); + if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + err = iavf_request_traffic_irqs(adapter, netdev->name); if (err) goto reset_err; - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; } - i40evf_configure(adapter); + iavf_configure(adapter); - i40evf_up_complete(adapter); + iavf_up_complete(adapter); - i40evf_irq_enable(adapter, true); + iavf_irq_enable(adapter, true); } else { - adapter->state = __I40EVF_DOWN; + adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); } - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); return; reset_err: - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); - i40evf_close(netdev); + iavf_close(netdev); } /** - * i40evf_adminq_task - worker thread to clean the admin queue + * iavf_adminq_task - worker thread to clean the admin queue * @work: pointer to work_struct containing our data **/ -static void i40evf_adminq_task(struct work_struct *work) +static void iavf_adminq_task(struct work_struct *work) { - struct i40evf_adapter *adapter = - container_of(work, struct i40evf_adapter, adminq_task); - struct i40e_hw *hw = &adapter->hw; + struct iavf_adapter *adapter = + container_of(work, struct iavf_adapter, adminq_task); + struct iavf_hw *hw = &adapter->hw; struct i40e_arq_event_info event; enum virtchnl_ops v_op; - i40e_status ret, v_ret; + iavf_status ret, v_ret; u32 val, oldval; u16 pending; - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) goto out; - event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) goto out; do { - ret = i40evf_clean_arq_element(hw, &event, &pending); + ret = iavf_clean_arq_element(hw, &event, &pending); v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low); + v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low); if (ret || !v_op) break; /* No event to process or error cleaning ARQ */ - i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, - event.msg_len); + iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, + event.msg_len); if (pending != 0) - memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); + memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); } while (pending); if ((adapter->flags & - (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) || - adapter->state == __I40EVF_RESETTING) + (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || + adapter->state == __IAVF_RESETTING) goto freedom; /* check for error indications */ @@ -2061,34 +2058,34 @@ static void i40evf_adminq_task(struct work_struct *work) if (val == 0xdeadbeef) /* indicates device in reset */ goto freedom; oldval = val; - if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) { + if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); - val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK; + val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; } - if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) { + if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); - val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK; + val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; } - if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) { + if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); - val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK; + val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; } if (oldval != val) wr32(hw, hw->aq.arq.len, val); val = rd32(hw, hw->aq.asq.len); oldval = val; - if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) { + if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); - val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK; + val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; } - if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) { + if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); - val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK; + val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; } - if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) { + if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); - val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK; + val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; } if (oldval != val) wr32(hw, hw->aq.asq.len, val); @@ -2097,58 +2094,58 @@ freedom: kfree(event.msg_buf); out: /* re-enable Admin queue interrupt cause */ - i40evf_misc_irq_enable(adapter); + iavf_misc_irq_enable(adapter); } /** - * i40evf_client_task - worker thread to perform client work + * iavf_client_task - worker thread to perform client work * @work: pointer to work_struct containing our data * * This task handles client interactions. Because client calls can be * reentrant, we can't handle them in the watchdog. **/ -static void i40evf_client_task(struct work_struct *work) +static void iavf_client_task(struct work_struct *work) { - struct i40evf_adapter *adapter = - container_of(work, struct i40evf_adapter, client_task.work); + struct iavf_adapter *adapter = + container_of(work, struct iavf_adapter, client_task.work); /* If we can't get the client bit, just give up. We'll be rescheduled * later. */ - if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section)) + if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) return; - if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) { - i40evf_client_subtask(adapter); - adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { + iavf_client_subtask(adapter); + adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; goto out; } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { - i40evf_notify_client_l2_params(&adapter->vsi); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS; + if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { + iavf_notify_client_l2_params(&adapter->vsi); + adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; goto out; } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) { - i40evf_notify_client_close(&adapter->vsi, false); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE; + if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { + iavf_notify_client_close(&adapter->vsi, false); + adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; goto out; } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) { - i40evf_notify_client_open(&adapter->vsi); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN; + if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { + iavf_notify_client_open(&adapter->vsi); + adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; } out: - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); } /** - * i40evf_free_all_tx_resources - Free Tx Resources for All Queues + * iavf_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/ -void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) +void iavf_free_all_tx_resources(struct iavf_adapter *adapter) { int i; @@ -2157,11 +2154,11 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) if (adapter->tx_rings[i].desc) - i40evf_free_tx_resources(&adapter->tx_rings[i]); + iavf_free_tx_resources(&adapter->tx_rings[i]); } /** - * i40evf_setup_all_tx_resources - allocate all queues Tx resources + * iavf_setup_all_tx_resources - allocate all queues Tx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or @@ -2170,13 +2167,13 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) * * Return 0 on success, negative on failure **/ -static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) +static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_active_queues; i++) { adapter->tx_rings[i].count = adapter->tx_desc_count; - err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]); + err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, @@ -2188,7 +2185,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) } /** - * i40evf_setup_all_rx_resources - allocate all queues Rx resources + * iavf_setup_all_rx_resources - allocate all queues Rx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or @@ -2197,13 +2194,13 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) * * Return 0 on success, negative on failure **/ -static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) +static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].count = adapter->rx_desc_count; - err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]); + err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, @@ -2214,12 +2211,12 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) } /** - * i40evf_free_all_rx_resources - Free Rx Resources for All Queues + * iavf_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources **/ -void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) +void iavf_free_all_rx_resources(struct iavf_adapter *adapter) { int i; @@ -2228,16 +2225,16 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) if (adapter->rx_rings[i].desc) - i40evf_free_rx_resources(&adapter->rx_rings[i]); + iavf_free_rx_resources(&adapter->rx_rings[i]); } /** - * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth + * iavf_validate_tx_bandwidth - validate the max Tx bandwidth * @adapter: board private structure * @max_tx_rate: max Tx bw for a tc **/ -static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter, - u64 max_tx_rate) +static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, + u64 max_tx_rate) { int speed = 0, ret = 0; @@ -2274,7 +2271,7 @@ static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter, } /** - * i40evf_validate_channel_config - validate queue mapping info + * iavf_validate_channel_config - validate queue mapping info * @adapter: board private structure * @mqprio_qopt: queue parameters * @@ -2282,15 +2279,15 @@ static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter, * configure queue channels is valid or not. Returns 0 on a valid * config. **/ -static int i40evf_validate_ch_config(struct i40evf_adapter *adapter, - struct tc_mqprio_qopt_offload *mqprio_qopt) +static int iavf_validate_ch_config(struct iavf_adapter *adapter, + struct tc_mqprio_qopt_offload *mqprio_qopt) { u64 total_max_rate = 0; int i, num_qps = 0; u64 tx_rate = 0; int ret = 0; - if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS || + if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || mqprio_qopt->qopt.num_tc < 1) return -EINVAL; @@ -2305,24 +2302,24 @@ static int i40evf_validate_ch_config(struct i40evf_adapter *adapter, } /*convert to Mbps */ tx_rate = div_u64(mqprio_qopt->max_rate[i], - I40EVF_MBPS_DIVISOR); + IAVF_MBPS_DIVISOR); total_max_rate += tx_rate; num_qps += mqprio_qopt->qopt.count[i]; } - if (num_qps > I40EVF_MAX_REQ_QUEUES) + if (num_qps > IAVF_MAX_REQ_QUEUES) return -EINVAL; - ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate); + ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); return ret; } /** - * i40evf_del_all_cloud_filters - delete all cloud filters + * iavf_del_all_cloud_filters - delete all cloud filters * on the traffic classes **/ -static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter) +static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) { - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_cloud_filter *cf, *cftmp; spin_lock_bh(&adapter->cloud_filter_list_lock); list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, @@ -2335,7 +2332,7 @@ static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter) } /** - * __i40evf_setup_tc - configure multiple traffic classes + * __iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure * @type_date: tc offload data * @@ -2345,10 +2342,10 @@ static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter) * * Returns 0 on success. **/ -static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) +static int __iavf_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); struct virtchnl_vf_resource *vfres = adapter->vf_res; u8 num_tc = 0, total_qps = 0; int ret = 0, netdev_tc = 0; @@ -2361,14 +2358,14 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) /* delete queue_channel */ if (!mqprio_qopt->qopt.hw) { - if (adapter->ch_config.state == __I40EVF_TC_RUNNING) { + if (adapter->ch_config.state == __IAVF_TC_RUNNING) { /* reset the tc configuration */ netdev_reset_tc(netdev); adapter->num_tc = 0; netif_tx_stop_all_queues(netdev); netif_tx_disable(netdev); - i40evf_del_all_cloud_filters(adapter); - adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS; + iavf_del_all_cloud_filters(adapter); + adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; goto exit; } else { return -EINVAL; @@ -2381,12 +2378,12 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) dev_err(&adapter->pdev->dev, "ADq not supported\n"); return -EOPNOTSUPP; } - if (adapter->ch_config.state != __I40EVF_TC_INVALID) { + if (adapter->ch_config.state != __IAVF_TC_INVALID) { dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); return -EINVAL; } - ret = i40evf_validate_ch_config(adapter, mqprio_qopt); + ret = iavf_validate_ch_config(adapter, mqprio_qopt); if (ret) return ret; /* Return if same TC config is requested */ @@ -2394,7 +2391,7 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) return 0; adapter->num_tc = num_tc; - for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { if (i < num_tc) { adapter->ch_config.ch_info[i].count = mqprio_qopt->qopt.count[i]; @@ -2404,7 +2401,7 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) max_tx_rate = mqprio_qopt->max_rate[i]; /* convert to Mbps */ max_tx_rate = div_u64(max_tx_rate, - I40EVF_MBPS_DIVISOR); + IAVF_MBPS_DIVISOR); adapter->ch_config.ch_info[i].max_tx_rate = max_tx_rate; } else { @@ -2415,11 +2412,11 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) adapter->ch_config.total_qps = total_qps; netif_tx_stop_all_queues(netdev); netif_tx_disable(netdev); - adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS; + adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; netdev_reset_tc(netdev); /* Report the tc mapping up the stack */ netdev_set_num_tc(adapter->netdev, num_tc); - for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { u16 qcount = mqprio_qopt->qopt.count[i]; u16 qoffset = mqprio_qopt->qopt.offset[i]; @@ -2433,14 +2430,14 @@ exit: } /** - * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel + * iavf_parse_cls_flower - Parse tc flower filters provided by kernel * @adapter: board private structure * @cls_flower: pointer to struct tc_cls_flower_offload * @filter: pointer to cloud filter structure */ -static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *f, - struct i40evf_cloud_filter *filter) +static int iavf_parse_cls_flower(struct iavf_adapter *adapter, + struct tc_cls_flower_offload *f, + struct iavf_cloud_filter *filter) { u16 n_proto_mask = 0; u16 n_proto_key = 0; @@ -2471,7 +2468,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, f->mask); if (mask->keyid != 0) - field_flags |= I40EVF_CLOUD_FIELD_TEN_ID; + field_flags |= IAVF_CLOUD_FIELD_TEN_ID; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { @@ -2518,7 +2515,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, /* use is_broadcast and is_zero to check for all 0xf or 0 */ if (!is_zero_ether_addr(mask->dst)) { if (is_broadcast_ether_addr(mask->dst)) { - field_flags |= I40EVF_CLOUD_FIELD_OMAC; + field_flags |= IAVF_CLOUD_FIELD_OMAC; } else { dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", mask->dst); @@ -2528,7 +2525,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, if (!is_zero_ether_addr(mask->src)) { if (is_broadcast_ether_addr(mask->src)) { - field_flags |= I40EVF_CLOUD_FIELD_IMAC; + field_flags |= IAVF_CLOUD_FIELD_IMAC; } else { dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", mask->src); @@ -2569,7 +2566,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, if (mask->vlan_id) { if (mask->vlan_id == VLAN_VID_MASK) { - field_flags |= I40EVF_CLOUD_FIELD_IVLAN; + field_flags |= IAVF_CLOUD_FIELD_IVLAN; } else { dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", mask->vlan_id); @@ -2601,7 +2598,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, if (mask->dst) { if (mask->dst == cpu_to_be32(0xffffffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", be32_to_cpu(mask->dst)); @@ -2611,7 +2608,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, if (mask->src) { if (mask->src == cpu_to_be32(0xffffffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", be32_to_cpu(mask->dst)); @@ -2619,7 +2616,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, } } - if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) { + if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); return I40E_ERR_CONFIG; } @@ -2660,7 +2657,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, return I40E_ERR_CONFIG; } if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; for (i = 0; i < 4; i++) vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); @@ -2683,7 +2680,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, if (mask->src) { if (mask->src == cpu_to_be16(0xffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", be16_to_cpu(mask->src)); @@ -2693,7 +2690,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, if (mask->dst) { if (mask->dst == cpu_to_be16(0xffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; + field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", be16_to_cpu(mask->dst)); @@ -2716,13 +2713,13 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, } /** - * i40evf_handle_tclass - Forward to a traffic class on the device + * iavf_handle_tclass - Forward to a traffic class on the device * @adapter: board private structure * @tc: traffic class index on the device * @filter: pointer to cloud filter structure */ -static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc, - struct i40evf_cloud_filter *filter) +static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, + struct iavf_cloud_filter *filter) { if (tc == 0) return 0; @@ -2740,15 +2737,15 @@ static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc, } /** - * i40evf_configure_clsflower - Add tc flower filters + * iavf_configure_clsflower - Add tc flower filters * @adapter: board private structure * @cls_flower: Pointer to struct tc_cls_flower_offload */ -static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) +static int iavf_configure_clsflower(struct iavf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) { int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); - struct i40evf_cloud_filter *filter = NULL; + struct iavf_cloud_filter *filter = NULL; int err = -EINVAL, count = 50; if (tc < 0) { @@ -2760,7 +2757,7 @@ static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, if (!filter) return -ENOMEM; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) { if (--count == 0) goto err; @@ -2773,11 +2770,11 @@ static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); /* start out with flow type and eth type IPv4 to begin with */ filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; - err = i40evf_parse_cls_flower(adapter, cls_flower, filter); + err = iavf_parse_cls_flower(adapter, cls_flower, filter); if (err < 0) goto err; - err = i40evf_handle_tclass(adapter, tc, filter); + err = iavf_handle_tclass(adapter, tc, filter); if (err < 0) goto err; @@ -2786,27 +2783,27 @@ static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, list_add_tail(&filter->list, &adapter->cloud_filter_list); adapter->num_cloud_filters++; filter->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; spin_unlock_bh(&adapter->cloud_filter_list_lock); err: if (err) kfree(filter); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); return err; } -/* i40evf_find_cf - Find the cloud filter in the list +/* iavf_find_cf - Find the cloud filter in the list * @adapter: Board private structure * @cookie: filter specific cookie * * Returns ptr to the filter object or NULL. Must be called while holding the * cloud_filter_list_lock. */ -static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter, - unsigned long *cookie) +static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, + unsigned long *cookie) { - struct i40evf_cloud_filter *filter = NULL; + struct iavf_cloud_filter *filter = NULL; if (!cookie) return NULL; @@ -2819,21 +2816,21 @@ static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter } /** - * i40evf_delete_clsflower - Remove tc flower filters + * iavf_delete_clsflower - Remove tc flower filters * @adapter: board private structure * @cls_flower: Pointer to struct tc_cls_flower_offload */ -static int i40evf_delete_clsflower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) +static int iavf_delete_clsflower(struct iavf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) { - struct i40evf_cloud_filter *filter = NULL; + struct iavf_cloud_filter *filter = NULL; int err = 0; spin_lock_bh(&adapter->cloud_filter_list_lock); - filter = i40evf_find_cf(adapter, &cls_flower->cookie); + filter = iavf_find_cf(adapter, &cls_flower->cookie); if (filter) { filter->del = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; + adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; } else { err = -EINVAL; } @@ -2843,21 +2840,21 @@ static int i40evf_delete_clsflower(struct i40evf_adapter *adapter, } /** - * i40evf_setup_tc_cls_flower - flower classifier offloads + * iavf_setup_tc_cls_flower - flower classifier offloads * @netdev: net device to configure * @type_data: offload data */ -static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) +static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) { if (cls_flower->common.chain_index) return -EOPNOTSUPP; switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - return i40evf_configure_clsflower(adapter, cls_flower); + return iavf_configure_clsflower(adapter, cls_flower); case TC_CLSFLOWER_DESTROY: - return i40evf_delete_clsflower(adapter, cls_flower); + return iavf_delete_clsflower(adapter, cls_flower); case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: @@ -2866,46 +2863,46 @@ static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, } /** - * i40evf_setup_tc_block_cb - block callback for tc + * iavf_setup_tc_block_cb - block callback for tc * @type: type of offload * @type_data: offload data * @cb_priv: * * This function is the block callback for traffic classes **/ -static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) +static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) { switch (type) { case TC_SETUP_CLSFLOWER: - return i40evf_setup_tc_cls_flower(cb_priv, type_data); + return iavf_setup_tc_cls_flower(cb_priv, type_data); default: return -EOPNOTSUPP; } } /** - * i40evf_setup_tc_block - register callbacks for tc + * iavf_setup_tc_block - register callbacks for tc * @netdev: network interface device structure * @f: tc offload data * * This function registers block callbacks for tc * offloads **/ -static int i40evf_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) +static int iavf_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) { - struct i40evf_adapter *adapter = netdev_priv(dev); + struct iavf_adapter *adapter = netdev_priv(dev); if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; switch (f->command) { case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb, + return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb, adapter, adapter, f->extack); case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb, + tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb, adapter); return 0; default: @@ -2914,7 +2911,7 @@ static int i40evf_setup_tc_block(struct net_device *dev, } /** - * i40evf_setup_tc - configure multiple traffic classes + * iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure * @type: type of offload * @type_date: tc offload data @@ -2924,21 +2921,21 @@ static int i40evf_setup_tc_block(struct net_device *dev, * * Returns 0 on success **/ -static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type, - void *type_data) +static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) { switch (type) { case TC_SETUP_QDISC_MQPRIO: - return __i40evf_setup_tc(netdev, type_data); + return __iavf_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: - return i40evf_setup_tc_block(netdev, type_data); + return iavf_setup_tc_block(netdev, type_data); default: return -EOPNOTSUPP; } } /** - * i40evf_open - Called when a network interface is made active + * iavf_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure @@ -2949,71 +2946,71 @@ static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type, * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ -static int i40evf_open(struct net_device *netdev) +static int iavf_open(struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); int err; - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); return -EIO; } - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) usleep_range(500, 1000); - if (adapter->state != __I40EVF_DOWN) { + if (adapter->state != __IAVF_DOWN) { err = -EBUSY; goto err_unlock; } /* allocate transmit descriptors */ - err = i40evf_setup_all_tx_resources(adapter); + err = iavf_setup_all_tx_resources(adapter); if (err) goto err_setup_tx; /* allocate receive descriptors */ - err = i40evf_setup_all_rx_resources(adapter); + err = iavf_setup_all_rx_resources(adapter); if (err) goto err_setup_rx; /* clear any pending interrupts, may auto mask */ - err = i40evf_request_traffic_irqs(adapter, netdev->name); + err = iavf_request_traffic_irqs(adapter, netdev->name); if (err) goto err_req_irq; spin_lock_bh(&adapter->mac_vlan_list_lock); - i40evf_add_filter(adapter, adapter->hw.mac.addr); + iavf_add_filter(adapter, adapter->hw.mac.addr); spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_configure(adapter); + iavf_configure(adapter); - i40evf_up_complete(adapter); + iavf_up_complete(adapter); - i40evf_irq_enable(adapter, true); + iavf_irq_enable(adapter, true); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); return 0; err_req_irq: - i40evf_down(adapter); - i40evf_free_traffic_irqs(adapter); + iavf_down(adapter); + iavf_free_traffic_irqs(adapter); err_setup_rx: - i40evf_free_all_rx_resources(adapter); + iavf_free_all_rx_resources(adapter); err_setup_tx: - i40evf_free_all_tx_resources(adapter); + iavf_free_all_tx_resources(adapter); err_unlock: - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); return err; } /** - * i40evf_close - Disables a network interface + * iavf_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail @@ -3023,41 +3020,41 @@ err_unlock: * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) * are freed, along with all transmit and receive resources. **/ -static int i40evf_close(struct net_device *netdev) +static int iavf_close(struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); int status; - if (adapter->state <= __I40EVF_DOWN_PENDING) + if (adapter->state <= __IAVF_DOWN_PENDING) return 0; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) usleep_range(500, 1000); - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) - adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE; + adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; - i40evf_down(adapter); - adapter->state = __I40EVF_DOWN_PENDING; - i40evf_free_traffic_irqs(adapter); + iavf_down(adapter); + adapter->state = __IAVF_DOWN_PENDING; + iavf_free_traffic_irqs(adapter); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); /* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in - * i40evf_virtchnl_completion() after we get confirmation from the PF + * iavf_virtchnl_completion() after we get confirmation from the PF * driver that the rings have been stopped. * - * Also, we wait for state to transition to __I40EVF_DOWN before - * returning. State change occurs in i40evf_virtchnl_completion() after + * Also, we wait for state to transition to __IAVF_DOWN before + * returning. State change occurs in iavf_virtchnl_completion() after * VF resources are released (which occurs after PF driver processes and * responds to admin queue commands). */ status = wait_event_timeout(adapter->down_waitqueue, - adapter->state == __I40EVF_DOWN, + adapter->state == __IAVF_DOWN, msecs_to_jiffies(200)); if (!status) netdev_warn(netdev, "Device resources not yet released\n"); @@ -3065,64 +3062,65 @@ static int i40evf_close(struct net_device *netdev) } /** - * i40evf_change_mtu - Change the Maximum Transfer Unit + * iavf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ -static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) +static int iavf_change_mtu(struct net_device *netdev, int new_mtu) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); netdev->mtu = new_mtu; if (CLIENT_ENABLED(adapter)) { - i40evf_notify_client_l2_params(&adapter->vsi); - adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + iavf_notify_client_l2_params(&adapter->vsi); + adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + adapter->flags |= IAVF_FLAG_RESET_NEEDED; schedule_work(&adapter->reset_task); return 0; } /** - * i40e_set_features - set the netdev feature flags + * iavf_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting * Note: expects to be called while under rtnl_lock() **/ -static int i40evf_set_features(struct net_device *netdev, - netdev_features_t features) +static int iavf_set_features(struct net_device *netdev, + netdev_features_t features) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); - /* Don't allow changing VLAN_RX flag when VLAN is set for VF - * and return an error in this case + /* Don't allow changing VLAN_RX flag when adapter is not capable + * of VLAN offload */ - if (VLAN_ALLOWED(adapter)) { + if (!VLAN_ALLOWED(adapter)) { + if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) + return -EINVAL; + } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) adapter->aq_required |= - I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; else adapter->aq_required |= - I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { - return -EINVAL; + IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; } return 0; } /** - * i40evf_features_check - Validate encapsulated packet conforms to limits + * iavf_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff * @dev: This physical port's netdev * @features: Offload features that the stack believes apply **/ -static netdev_features_t i40evf_features_check(struct sk_buff *skb, - struct net_device *dev, - netdev_features_t features) +static netdev_features_t iavf_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) { size_t len; @@ -3173,16 +3171,16 @@ out_err: } /** - * i40evf_fix_features - fix up the netdev feature bits + * iavf_fix_features - fix up the netdev feature bits * @netdev: our net device * @features: desired feature bits * * Returns fixed-up features bits **/ -static netdev_features_t i40evf_fix_features(struct net_device *netdev, - netdev_features_t features) +static netdev_features_t iavf_fix_features(struct net_device *netdev, + netdev_features_t features) { - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) features &= ~(NETIF_F_HW_VLAN_CTAG_TX | @@ -3192,37 +3190,37 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev, return features; } -static const struct net_device_ops i40evf_netdev_ops = { - .ndo_open = i40evf_open, - .ndo_stop = i40evf_close, - .ndo_start_xmit = i40evf_xmit_frame, - .ndo_set_rx_mode = i40evf_set_rx_mode, +static const struct net_device_ops iavf_netdev_ops = { + .ndo_open = iavf_open, + .ndo_stop = iavf_close, + .ndo_start_xmit = iavf_xmit_frame, + .ndo_set_rx_mode = iavf_set_rx_mode, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = i40evf_set_mac, - .ndo_change_mtu = i40evf_change_mtu, - .ndo_tx_timeout = i40evf_tx_timeout, - .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, - .ndo_features_check = i40evf_features_check, - .ndo_fix_features = i40evf_fix_features, - .ndo_set_features = i40evf_set_features, - .ndo_setup_tc = i40evf_setup_tc, + .ndo_set_mac_address = iavf_set_mac, + .ndo_change_mtu = iavf_change_mtu, + .ndo_tx_timeout = iavf_tx_timeout, + .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, + .ndo_features_check = iavf_features_check, + .ndo_fix_features = iavf_fix_features, + .ndo_set_features = iavf_set_features, + .ndo_setup_tc = iavf_setup_tc, }; /** - * i40evf_check_reset_complete - check that VF reset is complete + * iavf_check_reset_complete - check that VF reset is complete * @hw: pointer to hw struct * * Returns 0 if device is ready to use, or -EBUSY if it's in reset. **/ -static int i40evf_check_reset_complete(struct i40e_hw *hw) +static int iavf_check_reset_complete(struct iavf_hw *hw) { u32 rstat; int i; for (i = 0; i < 100; i++) { - rstat = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; + rstat = rd32(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if ((rstat == VIRTCHNL_VFR_VFACTIVE) || (rstat == VIRTCHNL_VFR_COMPLETED)) return 0; @@ -3232,18 +3230,18 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) } /** - * i40evf_process_config - Process the config information we got from the PF + * iavf_process_config - Process the config information we got from the PF * @adapter: board private structure * * Verify that we have a valid config struct, and set up our netdev features * and our VSI struct. **/ -int i40evf_process_config(struct i40evf_adapter *adapter) +int iavf_process_config(struct iavf_adapter *adapter) { struct virtchnl_vf_resource *vfres = adapter->vf_res; int i, num_req_queues = adapter->num_req_queues; struct net_device *netdev = adapter->netdev; - struct i40e_vsi *vsi = &adapter->vsi; + struct iavf_vsi *vsi = &adapter->vsi; netdev_features_t hw_enc_features; netdev_features_t hw_features; @@ -3267,9 +3265,9 @@ int i40evf_process_config(struct i40evf_adapter *adapter) "Requested %d queues, but PF only gave us %d.\n", num_req_queues, adapter->vsi_res->num_queue_pairs); - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; - i40evf_schedule_reset(adapter); + iavf_schedule_reset(adapter); return -ENODEV; } adapter->num_req_queues = 0; @@ -3332,6 +3330,8 @@ int i40evf_process_config(struct i40evf_adapter *adapter) if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->priv_flags |= IFF_UNICAST_FLT; + /* Do not turn on offloads when they are requested to be turned off. * TSO needs minimum 576 bytes to work correctly. */ @@ -3354,22 +3354,22 @@ int i40evf_process_config(struct i40evf_adapter *adapter) adapter->vsi.back = adapter; adapter->vsi.base_vector = 1; - adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; + adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; vsi->netdev = adapter->netdev; vsi->qs_handle = adapter->vsi_res->qset_handle; if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { adapter->rss_key_size = vfres->rss_key_size; adapter->rss_lut_size = vfres->rss_lut_size; } else { - adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE; - adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE; + adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; + adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; } return 0; } /** - * i40evf_init_task - worker thread to perform delayed initialization + * iavf_init_task - worker thread to perform delayed initialization * @work: pointer to work_struct containing our data * * This task completes the work that was begun in probe. Due to the nature @@ -3380,65 +3380,65 @@ int i40evf_process_config(struct i40evf_adapter *adapter) * communications with the PF driver and set up our netdev, the watchdog * takes over. **/ -static void i40evf_init_task(struct work_struct *work) +static void iavf_init_task(struct work_struct *work) { - struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, + struct iavf_adapter *adapter = container_of(work, + struct iavf_adapter, init_task.work); struct net_device *netdev = adapter->netdev; - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; int err, bufsz; switch (adapter->state) { - case __I40EVF_STARTUP: + case __IAVF_STARTUP: /* driver loaded, probe complete */ - adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - err = i40e_set_mac_type(hw); + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~IAVF_FLAG_RESET_PENDING; + err = iavf_set_mac_type(hw); if (err) { dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); goto err; } - err = i40evf_check_reset_complete(hw); + err = iavf_check_reset_complete(hw); if (err) { dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", err); goto err; } - hw->aq.num_arq_entries = I40EVF_AQ_LEN; - hw->aq.num_asq_entries = I40EVF_AQ_LEN; - hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; - hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; + hw->aq.num_arq_entries = IAVF_AQ_LEN; + hw->aq.num_asq_entries = IAVF_AQ_LEN; + hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; + hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; - err = i40evf_init_adminq(hw); + err = iavf_init_adminq(hw); if (err) { dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); goto err; } - err = i40evf_send_api_ver(adapter); + err = iavf_send_api_ver(adapter); if (err) { dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); - i40evf_shutdown_adminq(hw); + iavf_shutdown_adminq(hw); goto err; } - adapter->state = __I40EVF_INIT_VERSION_CHECK; + adapter->state = __IAVF_INIT_VERSION_CHECK; goto restart; - case __I40EVF_INIT_VERSION_CHECK: - if (!i40evf_asq_done(hw)) { + case __IAVF_INIT_VERSION_CHECK: + if (!iavf_asq_done(hw)) { dev_err(&pdev->dev, "Admin queue command never completed\n"); - i40evf_shutdown_adminq(hw); - adapter->state = __I40EVF_STARTUP; + iavf_shutdown_adminq(hw); + adapter->state = __IAVF_STARTUP; goto err; } /* aq msg sent, awaiting reply */ - err = i40evf_verify_api_ver(adapter); + err = iavf_verify_api_ver(adapter); if (err) { if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - err = i40evf_send_api_ver(adapter); + err = iavf_send_api_ver(adapter); else dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", adapter->pf_version.major, @@ -3447,34 +3447,34 @@ static void i40evf_init_task(struct work_struct *work) VIRTCHNL_VERSION_MINOR); goto err; } - err = i40evf_send_vf_config_msg(adapter); + err = iavf_send_vf_config_msg(adapter); if (err) { dev_err(&pdev->dev, "Unable to send config request (%d)\n", err); goto err; } - adapter->state = __I40EVF_INIT_GET_RESOURCES; + adapter->state = __IAVF_INIT_GET_RESOURCES; goto restart; - case __I40EVF_INIT_GET_RESOURCES: + case __IAVF_INIT_GET_RESOURCES: /* aq msg sent, awaiting reply */ if (!adapter->vf_res) { bufsz = sizeof(struct virtchnl_vf_resource) + - (I40E_MAX_VF_VSI * + (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); if (!adapter->vf_res) goto err; } - err = i40evf_get_vf_config(adapter); + err = iavf_get_vf_config(adapter); if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { - err = i40evf_send_vf_config_msg(adapter); + err = iavf_send_vf_config_msg(adapter); goto err; } else if (err == I40E_ERR_PARAM) { /* We only get ERR_PARAM if the device is in a very bad * state or if we've been disabled for previous bad * behavior. Either way, we're done now. */ - i40evf_shutdown_adminq(hw); + iavf_shutdown_adminq(hw); dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); return; } @@ -3483,25 +3483,25 @@ static void i40evf_init_task(struct work_struct *work) err); goto err_alloc; } - adapter->state = __I40EVF_INIT_SW; + adapter->state = __IAVF_INIT_SW; break; default: goto err_alloc; } - if (i40evf_process_config(adapter)) + if (iavf_process_config(adapter)) goto err_alloc; adapter->current_op = VIRTCHNL_OP_UNKNOWN; - adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; + adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; - netdev->netdev_ops = &i40evf_netdev_ops; - i40evf_set_ethtool_ops(netdev); + netdev->netdev_ops = &iavf_netdev_ops; + iavf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; /* MTU range: 68 - 9710 */ netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; + netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", @@ -3509,25 +3509,25 @@ static void i40evf_init_task(struct work_struct *work) eth_hw_addr_random(netdev); ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { - adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF; + adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF; ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } - timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0); + timer_setup(&adapter->watchdog_timer, iavf_watchdog_timer, 0); mod_timer(&adapter->watchdog_timer, jiffies + 1); - adapter->tx_desc_count = I40EVF_DEFAULT_TXD; - adapter->rx_desc_count = I40EVF_DEFAULT_RXD; - err = i40evf_init_interrupt_scheme(adapter); + adapter->tx_desc_count = IAVF_DEFAULT_TXD; + adapter->rx_desc_count = IAVF_DEFAULT_RXD; + err = iavf_init_interrupt_scheme(adapter); if (err) goto err_sw_init; - i40evf_map_rings_to_vectors(adapter); + iavf_map_rings_to_vectors(adapter); if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) - adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; + adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; - err = i40evf_request_misc_irq(adapter); + err = iavf_request_misc_irq(adapter); if (err) goto err_sw_init; @@ -3544,7 +3544,7 @@ static void i40evf_init_task(struct work_struct *work) netif_tx_stop_all_queues(netdev); if (CLIENT_ALLOWED(adapter)) { - err = i40evf_lan_add_device(adapter); + err = iavf_lan_add_device(adapter); if (err) dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", err); @@ -3554,9 +3554,9 @@ static void i40evf_init_task(struct work_struct *work) if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); - adapter->state = __I40EVF_DOWN; - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); - i40evf_misc_irq_enable(adapter); + adapter->state = __IAVF_DOWN; + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); + iavf_misc_irq_enable(adapter); wake_up(&adapter->down_waitqueue); adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); @@ -3565,31 +3565,31 @@ static void i40evf_init_task(struct work_struct *work) goto err_mem; if (RSS_AQ(adapter)) { - adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } else { - i40evf_init_rss(adapter); + iavf_init_rss(adapter); } return; restart: schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30)); return; err_mem: - i40evf_free_rss(adapter); + iavf_free_rss(adapter); err_register: - i40evf_free_misc_irq(adapter); + iavf_free_misc_irq(adapter); err_sw_init: - i40evf_reset_interrupt_capability(adapter); + iavf_reset_interrupt_capability(adapter); err_alloc: kfree(adapter->vf_res); adapter->vf_res = NULL; err: /* Things went into the weeds, so try again later */ - if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { + if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n"); - adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - i40evf_shutdown_adminq(hw); - adapter->state = __I40EVF_STARTUP; + adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; + iavf_shutdown_adminq(hw); + adapter->state = __IAVF_STARTUP; schedule_delayed_work(&adapter->init_task, HZ * 5); return; } @@ -3597,21 +3597,21 @@ err: } /** - * i40evf_shutdown - Shutdown the device in preparation for a reboot + * iavf_shutdown - Shutdown the device in preparation for a reboot * @pdev: pci device structure **/ -static void i40evf_shutdown(struct pci_dev *pdev) +static void iavf_shutdown(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); if (netif_running(netdev)) - i40evf_close(netdev); + iavf_close(netdev); /* Prevent the watchdog from running. */ - adapter->state = __I40EVF_REMOVE; + adapter->state = __IAVF_REMOVE; adapter->aq_required = 0; #ifdef CONFIG_PM @@ -3622,21 +3622,21 @@ static void i40evf_shutdown(struct pci_dev *pdev) } /** - * i40evf_probe - Device Initialization Routine + * iavf_probe - Device Initialization Routine * @pdev: PCI device information struct - * @ent: entry in i40evf_pci_tbl + * @ent: entry in iavf_pci_tbl * * Returns 0 on success, negative on failure * - * i40evf_probe initializes an adapter identified by a pci_dev structure. + * iavf_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; - struct i40evf_adapter *adapter = NULL; - struct i40e_hw *hw = NULL; + struct iavf_adapter *adapter = NULL; + struct iavf_hw *hw = NULL; int err; err = pci_enable_device(pdev); @@ -3653,7 +3653,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - err = pci_request_regions(pdev, i40evf_driver_name); + err = pci_request_regions(pdev, iavf_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); @@ -3664,8 +3664,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), - I40EVF_MAX_REQ_QUEUES); + netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), + IAVF_MAX_REQ_QUEUES); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; @@ -3683,7 +3683,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->back = adapter; adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - adapter->state = __I40EVF_STARTUP; + adapter->state = __IAVF_STARTUP; /* Call save state here because it relies on the adapter struct. */ pci_save_state(pdev); @@ -3716,11 +3716,11 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&adapter->vlan_filter_list); INIT_LIST_HEAD(&adapter->cloud_filter_list); - INIT_WORK(&adapter->reset_task, i40evf_reset_task); - INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); - INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task); - INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task); - INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task); + INIT_WORK(&adapter->reset_task, iavf_reset_task); + INIT_WORK(&adapter->adminq_task, iavf_adminq_task); + INIT_WORK(&adapter->watchdog_task, iavf_watchdog_task); + INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); + INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07))); @@ -3741,33 +3741,33 @@ err_dma: #ifdef CONFIG_PM /** - * i40evf_suspend - Power management suspend routine + * iavf_suspend - Power management suspend routine * @pdev: PCI device information struct * @state: unused * * Called when the system (VM) is entering sleep/suspend. **/ -static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) +static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); - struct i40evf_adapter *adapter = netdev_priv(netdev); + struct iavf_adapter *adapter = netdev_priv(netdev); int retval = 0; netif_device_detach(netdev); - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) usleep_range(500, 1000); if (netif_running(netdev)) { rtnl_lock(); - i40evf_down(adapter); + iavf_down(adapter); rtnl_unlock(); } - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); retval = pci_save_state(pdev); if (retval) @@ -3779,14 +3779,14 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) } /** - * i40evf_resume - Power management resume routine + * iavf_resume - Power management resume routine * @pdev: PCI device information struct * * Called when the system (VM) is resumed from sleep/suspend. **/ -static int i40evf_resume(struct pci_dev *pdev) +static int iavf_resume(struct pci_dev *pdev) { - struct i40evf_adapter *adapter = pci_get_drvdata(pdev); + struct iavf_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; u32 err; @@ -3805,13 +3805,13 @@ static int i40evf_resume(struct pci_dev *pdev) pci_set_master(pdev); rtnl_lock(); - err = i40evf_set_interrupt_capability(adapter); + err = iavf_set_interrupt_capability(adapter); if (err) { rtnl_unlock(); dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); return err; } - err = i40evf_request_misc_irq(adapter); + err = iavf_request_misc_irq(adapter); rtnl_unlock(); if (err) { dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); @@ -3827,25 +3827,25 @@ static int i40evf_resume(struct pci_dev *pdev) #endif /* CONFIG_PM */ /** - * i40evf_remove - Device Removal Routine + * iavf_remove - Device Removal Routine * @pdev: PCI device information struct * - * i40evf_remove is called by the PCI subsystem to alert the driver + * iavf_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ -static void i40evf_remove(struct pci_dev *pdev) +static void iavf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40evf_vlan_filter *vlf, *vlftmp; - struct i40evf_mac_filter *f, *ftmp; - struct i40evf_cloud_filter *cf, *cftmp; - struct i40e_hw *hw = &adapter->hw; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_vlan_filter *vlf, *vlftmp; + struct iavf_mac_filter *f, *ftmp; + struct iavf_cloud_filter *cf, *cftmp; + struct iavf_hw *hw = &adapter->hw; int err; /* Indicate we are in remove and not to run reset_task */ - set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section); + set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); cancel_delayed_work_sync(&adapter->init_task); cancel_work_sync(&adapter->reset_task); cancel_delayed_work_sync(&adapter->client_task); @@ -3854,37 +3854,39 @@ static void i40evf_remove(struct pci_dev *pdev) adapter->netdev_registered = false; } if (CLIENT_ALLOWED(adapter)) { - err = i40evf_lan_del_device(adapter); + err = iavf_lan_del_device(adapter); if (err) dev_warn(&pdev->dev, "Failed to delete client device: %d\n", err); } /* Shut down all the garbage mashers on the detention level */ - adapter->state = __I40EVF_REMOVE; + adapter->state = __IAVF_REMOVE; adapter->aq_required = 0; - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - i40evf_request_reset(adapter); + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + iavf_request_reset(adapter); msleep(50); /* If the FW isn't responding, kick it once, but only once. */ - if (!i40evf_asq_done(hw)) { - i40evf_request_reset(adapter); + if (!iavf_asq_done(hw)) { + iavf_request_reset(adapter); msleep(50); } - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - i40evf_misc_irq_disable(adapter); - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - i40evf_free_q_vectors(adapter); + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + iavf_misc_irq_disable(adapter); + iavf_free_misc_irq(adapter); + iavf_reset_interrupt_capability(adapter); + iavf_free_q_vectors(adapter); if (adapter->watchdog_timer.function) del_timer_sync(&adapter->watchdog_timer); - i40evf_free_rss(adapter); + cancel_work_sync(&adapter->adminq_task); + + iavf_free_rss(adapter); if (hw->aq.asq.count) - i40evf_shutdown_adminq(hw); + iavf_shutdown_adminq(hw); /* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); @@ -3892,9 +3894,9 @@ static void i40evf_remove(struct pci_dev *pdev) iounmap(hw->hw_addr); pci_release_regions(pdev); - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - i40evf_free_queues(adapter); + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + iavf_free_queues(adapter); kfree(adapter->vf_res); spin_lock_bh(&adapter->mac_vlan_list_lock); /* If we got removed before an up/down sequence, we've got a filter @@ -3926,57 +3928,57 @@ static void i40evf_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -static struct pci_driver i40evf_driver = { - .name = i40evf_driver_name, - .id_table = i40evf_pci_tbl, - .probe = i40evf_probe, - .remove = i40evf_remove, +static struct pci_driver iavf_driver = { + .name = iavf_driver_name, + .id_table = iavf_pci_tbl, + .probe = iavf_probe, + .remove = iavf_remove, #ifdef CONFIG_PM - .suspend = i40evf_suspend, - .resume = i40evf_resume, + .suspend = iavf_suspend, + .resume = iavf_resume, #endif - .shutdown = i40evf_shutdown, + .shutdown = iavf_shutdown, }; /** - * i40e_init_module - Driver Registration Routine + * iavf_init_module - Driver Registration Routine * - * i40e_init_module is the first routine called when the driver is + * iavf_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ -static int __init i40evf_init_module(void) +static int __init iavf_init_module(void) { int ret; - pr_info("i40evf: %s - version %s\n", i40evf_driver_string, - i40evf_driver_version); + pr_info("iavf: %s - version %s\n", iavf_driver_string, + iavf_driver_version); - pr_info("%s\n", i40evf_copyright); + pr_info("%s\n", iavf_copyright); - i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, - i40evf_driver_name); - if (!i40evf_wq) { - pr_err("%s: Failed to create workqueue\n", i40evf_driver_name); + iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, + iavf_driver_name); + if (!iavf_wq) { + pr_err("%s: Failed to create workqueue\n", iavf_driver_name); return -ENOMEM; } - ret = pci_register_driver(&i40evf_driver); + ret = pci_register_driver(&iavf_driver); return ret; } -module_init(i40evf_init_module); +module_init(iavf_init_module); /** - * i40e_exit_module - Driver Exit Cleanup Routine + * iavf_exit_module - Driver Exit Cleanup Routine * - * i40e_exit_module is called just before the driver is removed + * iavf_exit_module is called just before the driver is removed * from memory. **/ -static void __exit i40evf_exit_module(void) +static void __exit iavf_exit_module(void) { - pci_unregister_driver(&i40evf_driver); - destroy_workqueue(i40evf_wq); + pci_unregister_driver(&iavf_driver); + destroy_workqueue(iavf_wq); } -module_exit(i40evf_exit_module); +module_exit(iavf_exit_module); -/* i40evf_main.c */ +/* iavf_main.c */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/iavf/iavf_osdep.h index 3ddddb46455b..e6e0b0328706 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ b/drivers/net/ethernet/intel/iavf/iavf_osdep.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#ifndef _I40E_OSDEP_H_ -#define _I40E_OSDEP_H_ +#ifndef _IAVF_OSDEP_H_ +#define _IAVF_OSDEP_H_ #include <linux/types.h> #include <linux/if_ether.h> @@ -24,29 +24,29 @@ #define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) #define rd64(a, reg) readq((a)->hw_addr + (reg)) -#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT) +#define iavf_flush(a) readl((a)->hw_addr + IAVF_VFGEN_RSTAT) /* memory allocation tracking */ -struct i40e_dma_mem { +struct iavf_dma_mem { void *va; dma_addr_t pa; u32 size; }; -#define i40e_allocate_dma_mem(h, m, unused, s, a) \ - i40evf_allocate_dma_mem_d(h, m, s, a) -#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m) +#define iavf_allocate_dma_mem(h, m, unused, s, a) \ + iavf_allocate_dma_mem_d(h, m, s, a) +#define iavf_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m) -struct i40e_virt_mem { +struct iavf_virt_mem { void *va; u32 size; }; -#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s) -#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m) +#define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s) +#define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m) -#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__) -extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) +#define iavf_debug(h, m, s, ...) iavf_debug_d(h, m, s, ##__VA_ARGS__) +extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...) __attribute__ ((format(gnu_printf, 3, 4))); -typedef enum i40e_status_code i40e_status; -#endif /* _I40E_OSDEP_H_ */ +typedef enum iavf_status_code iavf_status; +#endif /* _IAVF_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h new file mode 100644 index 000000000000..d6685103af39 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_PROTOTYPE_H_ +#define _IAVF_PROTOTYPE_H_ + +#include "iavf_type.h" +#include "iavf_alloc.h" +#include <linux/avf/virtchnl.h> + +/* Prototypes for shared code functions that are not in + * the standard function pointer structures. These are + * mostly because they are needed even before the init + * has happened and will assist in the early SW and FW + * setup. + */ + +/* adminq functions */ +iavf_status iavf_init_adminq(struct iavf_hw *hw); +iavf_status iavf_shutdown_adminq(struct iavf_hw *hw); +void i40e_adminq_init_ring_data(struct iavf_hw *hw); +iavf_status iavf_clean_arq_element(struct iavf_hw *hw, + struct i40e_arq_event_info *e, + u16 *events_pending); +iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +bool iavf_asq_done(struct iavf_hw *hw); + +/* debug function for adminq */ +void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, + void *desc, void *buffer, u16 buf_len); + +void i40e_idle_aq(struct iavf_hw *hw); +void iavf_resume_aq(struct iavf_hw *hw); +bool iavf_check_asq_alive(struct iavf_hw *hw); +iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading); +const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err); +const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err); + +iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); + +iavf_status iavf_set_mac_type(struct iavf_hw *hw); + +extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[]; + +static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return iavf_ptype_lookup[ptype]; +} + +void iavf_vf_parse_hw_config(struct iavf_hw *hw, + struct virtchnl_vf_resource *msg); +iavf_status iavf_vf_reset(struct iavf_hw *hw); +iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, + enum virtchnl_ops v_opcode, + iavf_status v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +#endif /* _IAVF_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h new file mode 100644 index 000000000000..bf793332fc9d --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_register.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_REGISTER_H_ +#define _IAVF_REGISTER_H_ + +#define IAVF_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ +#define IAVF_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ +#define IAVF_VF_ARQH1 0x00007400 /* Reset: EMPR */ +#define IAVF_VF_ARQH1_ARQH_SHIFT 0 +#define IAVF_VF_ARQH1_ARQH_MASK IAVF_MASK(0x3FF, IAVF_VF_ARQH1_ARQH_SHIFT) +#define IAVF_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ +#define IAVF_VF_ARQLEN1_ARQVFE_SHIFT 28 +#define IAVF_VF_ARQLEN1_ARQVFE_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQVFE_SHIFT) +#define IAVF_VF_ARQLEN1_ARQOVFL_SHIFT 29 +#define IAVF_VF_ARQLEN1_ARQOVFL_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQOVFL_SHIFT) +#define IAVF_VF_ARQLEN1_ARQCRIT_SHIFT 30 +#define IAVF_VF_ARQLEN1_ARQCRIT_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQCRIT_SHIFT) +#define IAVF_VF_ARQLEN1_ARQENABLE_SHIFT 31 +#define IAVF_VF_ARQLEN1_ARQENABLE_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQENABLE_SHIFT) +#define IAVF_VF_ARQT1 0x00007000 /* Reset: EMPR */ +#define IAVF_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ +#define IAVF_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ +#define IAVF_VF_ATQH1 0x00006400 /* Reset: EMPR */ +#define IAVF_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ +#define IAVF_VF_ATQLEN1_ATQVFE_SHIFT 28 +#define IAVF_VF_ATQLEN1_ATQVFE_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQVFE_SHIFT) +#define IAVF_VF_ATQLEN1_ATQOVFL_SHIFT 29 +#define IAVF_VF_ATQLEN1_ATQOVFL_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQOVFL_SHIFT) +#define IAVF_VF_ATQLEN1_ATQCRIT_SHIFT 30 +#define IAVF_VF_ATQLEN1_ATQCRIT_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQCRIT_SHIFT) +#define IAVF_VF_ATQLEN1_ATQENABLE_SHIFT 31 +#define IAVF_VF_ATQLEN1_ATQENABLE_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQENABLE_SHIFT) +#define IAVF_VF_ATQT1 0x00008400 /* Reset: EMPR */ +#define IAVF_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ +#define IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT 0 +#define IAVF_VFGEN_RSTAT_VFR_STATE_MASK IAVF_MASK(0x3, IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT) +#define IAVF_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ +#define IAVF_VFINT_DYN_CTL01_INTENA_SHIFT 0 +#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT) +#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 +#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) +#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0 +#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT) +#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 +#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) +#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 +#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) +#define IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 +#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 +#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) +#define IAVF_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ +#define IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 +#define IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK IAVF_MASK(0x1, IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT) +#define IAVF_VFINT_ICR0_ENA1_RSVD_SHIFT 31 +#define IAVF_VFINT_ICR01 0x00004800 /* Reset: CORER */ +#define IAVF_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ +#define IAVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define IAVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ +#define IAVF_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define IAVF_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define IAVF_VFQF_HKEY_MAX_INDEX 12 +#define IAVF_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define IAVF_VFQF_HLUT_MAX_INDEX 15 +#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#endif /* _IAVF_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h index 77be0702d07c..46742fab7b8c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_status.h +++ b/drivers/net/ethernet/intel/iavf/iavf_status.h @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#ifndef _I40E_STATUS_H_ -#define _I40E_STATUS_H_ +#ifndef _IAVF_STATUS_H_ +#define _IAVF_STATUS_H_ /* Error Codes */ -enum i40e_status_code { +enum iavf_status_code { I40E_SUCCESS = 0, I40E_ERR_NVM = -1, I40E_ERR_NVM_CHECKSUM = -2, @@ -75,4 +75,4 @@ enum i40e_status_code { I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, }; -#endif /* _I40E_STATUS_H_ */ +#endif /* _IAVF_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h index d7a4e68820a8..1474f5539751 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_trace.h +++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h @@ -3,16 +3,16 @@ /* Modeled on trace-events-sample.h */ -/* The trace subsystem name for i40evf will be "i40evf". +/* The trace subsystem name for iavf will be "iavf". * - * This file is named i40e_trace.h. + * This file is named iavf_trace.h. * * Since this include file's name is different from the trace * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end * of this file. */ #undef TRACE_SYSTEM -#define TRACE_SYSTEM i40evf +#define TRACE_SYSTEM iavf /* See trace-events-sample.h for a detailed description of why this * guard clause is different from most normal include files. @@ -23,14 +23,14 @@ #include <linux/tracepoint.h> /** - * i40e_trace() macro enables shared code to refer to trace points + * iavf_trace() macro enables shared code to refer to trace points * like: * - * trace_i40e{,vf}_example(args...) + * trace_iavf{,vf}_example(args...) * * ... as: * - * i40e_trace(example, args...) + * iavf_trace(example, args...) * * ... to resolve to the PF or VF version of the tracepoint without * ifdefs, and to allow tracepoints to be disabled entirely at build @@ -39,29 +39,29 @@ * Trace point should always be referred to in the driver via this * macro. * - * Similarly, i40e_trace_enabled(trace_name) wraps references to - * trace_i40e{,vf}_<trace_name>_enabled() functions. + * Similarly, iavf_trace_enabled(trace_name) wraps references to + * trace_iavf{,vf}_<trace_name>_enabled() functions. */ -#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40evf ## _ ## trace_name) -#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name) +#define _IAVF_TRACE_NAME(trace_name) (trace_ ## iavf ## _ ## trace_name) +#define IAVF_TRACE_NAME(trace_name) _IAVF_TRACE_NAME(trace_name) -#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args) +#define iavf_trace(trace_name, args...) IAVF_TRACE_NAME(trace_name)(args) -#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)() +#define iavf_trace_enabled(trace_name) IAVF_TRACE_NAME(trace_name##_enabled)() /* Events common to PF and VF. Corresponding versions will be defined - * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace() + * for both, named trace_iavf_* and trace_iavf_*. The iavf_trace() * macro above will select the right trace point name for the driver * being built from shared code. */ /* Events related to a vsi & ring */ DECLARE_EVENT_CLASS( - i40evf_tx_template, + iavf_tx_template, - TP_PROTO(struct i40e_ring *ring, - struct i40e_tx_desc *desc, - struct i40e_tx_buffer *buf), + TP_PROTO(struct iavf_ring *ring, + struct iavf_tx_desc *desc, + struct iavf_tx_buffer *buf), TP_ARGS(ring, desc, buf), @@ -93,26 +93,26 @@ DECLARE_EVENT_CLASS( ); DEFINE_EVENT( - i40evf_tx_template, i40evf_clean_tx_irq, - TP_PROTO(struct i40e_ring *ring, - struct i40e_tx_desc *desc, - struct i40e_tx_buffer *buf), + iavf_tx_template, iavf_clean_tx_irq, + TP_PROTO(struct iavf_ring *ring, + struct iavf_tx_desc *desc, + struct iavf_tx_buffer *buf), TP_ARGS(ring, desc, buf)); DEFINE_EVENT( - i40evf_tx_template, i40evf_clean_tx_irq_unmap, - TP_PROTO(struct i40e_ring *ring, - struct i40e_tx_desc *desc, - struct i40e_tx_buffer *buf), + iavf_tx_template, iavf_clean_tx_irq_unmap, + TP_PROTO(struct iavf_ring *ring, + struct iavf_tx_desc *desc, + struct iavf_tx_buffer *buf), TP_ARGS(ring, desc, buf)); DECLARE_EVENT_CLASS( - i40evf_rx_template, + iavf_rx_template, - TP_PROTO(struct i40e_ring *ring, - union i40e_32byte_rx_desc *desc, + TP_PROTO(struct iavf_ring *ring, + union iavf_32byte_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb), @@ -138,26 +138,26 @@ DECLARE_EVENT_CLASS( ); DEFINE_EVENT( - i40evf_rx_template, i40evf_clean_rx_irq, - TP_PROTO(struct i40e_ring *ring, - union i40e_32byte_rx_desc *desc, + iavf_rx_template, iavf_clean_rx_irq, + TP_PROTO(struct iavf_ring *ring, + union iavf_32byte_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb)); DEFINE_EVENT( - i40evf_rx_template, i40evf_clean_rx_irq_rx, - TP_PROTO(struct i40e_ring *ring, - union i40e_32byte_rx_desc *desc, + iavf_rx_template, iavf_clean_rx_irq_rx, + TP_PROTO(struct iavf_ring *ring, + union iavf_32byte_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb)); DECLARE_EVENT_CLASS( - i40evf_xmit_template, + iavf_xmit_template, TP_PROTO(struct sk_buff *skb, - struct i40e_ring *ring), + struct iavf_ring *ring), TP_ARGS(skb, ring), @@ -180,23 +180,23 @@ DECLARE_EVENT_CLASS( ); DEFINE_EVENT( - i40evf_xmit_template, i40evf_xmit_frame_ring, + iavf_xmit_template, iavf_xmit_frame_ring, TP_PROTO(struct sk_buff *skb, - struct i40e_ring *ring), + struct iavf_ring *ring), TP_ARGS(skb, ring)); DEFINE_EVENT( - i40evf_xmit_template, i40evf_xmit_frame_ring_drop, + iavf_xmit_template, iavf_xmit_frame_ring_drop, TP_PROTO(struct sk_buff *skb, - struct i40e_ring *ring), + struct iavf_ring *ring), TP_ARGS(skb, ring)); /* Events unique to the VF. */ -#endif /* _I40E_TRACE_H_ */ -/* This must be outside ifdef _I40E_TRACE_H */ +#endif /* _IAVF_TRACE_H_ */ +/* This must be outside ifdef _IAVF_TRACE_H */ /* This trace include file is not located in the .../include/trace * with the kernel tracepoint definitions, because we're a loadable @@ -205,5 +205,5 @@ DEFINE_EVENT( #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE i40e_trace +#define TRACE_INCLUDE_FILE iavf_trace #include <trace/define_trace.h> diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index a9730711e257..edc349f49748 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -4,32 +4,32 @@ #include <linux/prefetch.h> #include <net/busy_poll.h> -#include "i40evf.h" -#include "i40e_trace.h" -#include "i40e_prototype.h" +#include "iavf.h" +#include "iavf_trace.h" +#include "iavf_prototype.h" static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, u32 td_tag) { - return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | - ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | - ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | - ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | - ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); + return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA | + ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) | + ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) | + ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT)); } -#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) +#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS) /** - * i40e_unmap_and_free_tx_resource - Release a Tx buffer + * iavf_unmap_and_free_tx_resource - Release a Tx buffer * @ring: the ring that owns the buffer * @tx_buffer: the buffer to free **/ -static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, - struct i40e_tx_buffer *tx_buffer) +static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring, + struct iavf_tx_buffer *tx_buffer) { if (tx_buffer->skb) { - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB) kfree(tx_buffer->raw_buf); else dev_kfree_skb_any(tx_buffer->skb); @@ -52,10 +52,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, } /** - * i40evf_clean_tx_ring - Free any empty Tx buffers + * iavf_clean_tx_ring - Free any empty Tx buffers * @tx_ring: ring to be cleaned **/ -void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) +void iavf_clean_tx_ring(struct iavf_ring *tx_ring) { unsigned long bi_size; u16 i; @@ -66,9 +66,9 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) - i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); + iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); - bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; memset(tx_ring->tx_bi, 0, bi_size); /* Zero out the descriptor ring */ @@ -85,14 +85,14 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) } /** - * i40evf_free_tx_resources - Free Tx resources per queue + * iavf_free_tx_resources - Free Tx resources per queue * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ -void i40evf_free_tx_resources(struct i40e_ring *tx_ring) +void iavf_free_tx_resources(struct iavf_ring *tx_ring) { - i40evf_clean_tx_ring(tx_ring); + iavf_clean_tx_ring(tx_ring); kfree(tx_ring->tx_bi); tx_ring->tx_bi = NULL; @@ -104,14 +104,14 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) } /** - * i40evf_get_tx_pending - how many Tx descriptors not processed + * iavf_get_tx_pending - how many Tx descriptors not processed * @ring: the ring of descriptors * @in_sw: is tx_pending being checked in SW or HW * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ -u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) +u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) { u32 head, tail; @@ -126,15 +126,15 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) } /** - * i40evf_detect_recover_hung - Function to detect and recover hung_queues + * iavf_detect_recover_hung - Function to detect and recover hung_queues * @vsi: pointer to vsi struct with tx queues * * VSI has netdev and netdev has TX queues. This function is to check each of * those TX queues if they are hung, trigger recovery by issuing SW interrupt. **/ -void i40evf_detect_recover_hung(struct i40e_vsi *vsi) +void iavf_detect_recover_hung(struct iavf_vsi *vsi) { - struct i40e_ring *tx_ring = NULL; + struct iavf_ring *tx_ring = NULL; struct net_device *netdev; unsigned int i; int packets; @@ -142,7 +142,7 @@ void i40evf_detect_recover_hung(struct i40e_vsi *vsi) if (!vsi) return; - if (test_bit(__I40E_VSI_DOWN, vsi->state)) + if (test_bit(__IAVF_VSI_DOWN, vsi->state)) return; netdev = vsi->netdev; @@ -164,16 +164,16 @@ void i40evf_detect_recover_hung(struct i40e_vsi *vsi) */ packets = tx_ring->stats.packets & INT_MAX; if (tx_ring->tx_stats.prev_pkt_ctr == packets) { - i40evf_force_wb(vsi, tx_ring->q_vector); + iavf_force_wb(vsi, tx_ring->q_vector); continue; } /* Memory barrier between read of packet count and call - * to i40evf_get_tx_pending() + * to iavf_get_tx_pending() */ smp_rmb(); tx_ring->tx_stats.prev_pkt_ctr = - i40evf_get_tx_pending(tx_ring, true) ? packets : -1; + iavf_get_tx_pending(tx_ring, true) ? packets : -1; } } } @@ -181,28 +181,28 @@ void i40evf_detect_recover_hung(struct i40e_vsi *vsi) #define WB_STRIDE 4 /** - * i40e_clean_tx_irq - Reclaim resources after transmit completes + * iavf_clean_tx_irq - Reclaim resources after transmit completes * @vsi: the VSI we care about * @tx_ring: Tx ring to clean * @napi_budget: Used to determine if we are in netpoll * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, - struct i40e_ring *tx_ring, int napi_budget) +static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, + struct iavf_ring *tx_ring, int napi_budget) { u16 i = tx_ring->next_to_clean; - struct i40e_tx_buffer *tx_buf; - struct i40e_tx_desc *tx_desc; + struct iavf_tx_buffer *tx_buf; + struct iavf_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; unsigned int budget = vsi->work_limit; tx_buf = &tx_ring->tx_bi[i]; - tx_desc = I40E_TX_DESC(tx_ring, i); + tx_desc = IAVF_TX_DESC(tx_ring, i); i -= tx_ring->count; do { - struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; + struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) @@ -211,10 +211,10 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, /* prevent any other reads prior to eop_desc */ smp_rmb(); - i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); + iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & - cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) + cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE))) break; /* clear next_to_watch to prevent false hangs */ @@ -239,7 +239,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, /* unmap remaining buffers */ while (tx_desc != eop_desc) { - i40e_trace(clean_tx_irq_unmap, + iavf_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); tx_buf++; @@ -248,7 +248,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; - tx_desc = I40E_TX_DESC(tx_ring, 0); + tx_desc = IAVF_TX_DESC(tx_ring, 0); } /* unmap any remaining paged data */ @@ -268,7 +268,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; - tx_desc = I40E_TX_DESC(tx_ring, 0); + tx_desc = IAVF_TX_DESC(tx_ring, 0); } prefetch(tx_desc); @@ -286,18 +286,18 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; - if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { + if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) { /* check to see if there are < 4 descriptors * waiting to be written back, then kick the hardware to force * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ - unsigned int j = i40evf_get_tx_pending(tx_ring, false); + unsigned int j = iavf_get_tx_pending(tx_ring, false); if (budget && ((j / WB_STRIDE) == 0) && (j > 0) && - !test_bit(__I40E_VSI_DOWN, vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + !test_bit(__IAVF_VSI_DOWN, vsi->state) && + (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; } @@ -307,14 +307,14 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && - (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__I40E_VSI_DOWN, vsi->state)) { + !test_bit(__IAVF_VSI_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; @@ -325,75 +325,75 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, } /** - * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled + * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled * @vsi: the VSI we care about * @q_vector: the vector on which to enable writeback * **/ -static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, - struct i40e_q_vector *q_vector) +static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi, + struct iavf_q_vector *q_vector) { u16 flags = q_vector->tx.ring[0].flags; u32 val; - if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) + if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR)) return; if (q_vector->arm_wb_state) return; - val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ + val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val); + IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); q_vector->arm_wb_state = true; } /** - * i40evf_force_wb - Issue SW Interrupt so HW does a wb + * iavf_force_wb - Issue SW Interrupt so HW does a wb * @vsi: the VSI we care about * @q_vector: the vector on which to force writeback * **/ -void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector) { - u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ - I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK + u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ + IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | + IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK /* allow 00 to be written to the index */; wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), + IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); } -static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, - struct i40e_ring_container *rc) +static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector, + struct iavf_ring_container *rc) { return &q_vector->rx == rc; } -static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) +static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector) { unsigned int divisor; switch (q_vector->adapter->link_speed) { case I40E_LINK_SPEED_40GB: - divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024; + divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024; break; case I40E_LINK_SPEED_25GB: case I40E_LINK_SPEED_20GB: - divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512; + divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512; break; default: case I40E_LINK_SPEED_10GB: - divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256; + divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256; break; case I40E_LINK_SPEED_1GB: case I40E_LINK_SPEED_100MB: - divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32; + divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32; break; } @@ -401,7 +401,7 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) } /** - * i40e_update_itr - update the dynamic ITR value based on statistics + * iavf_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information * @rc: structure containing ring performance data * @@ -413,8 +413,8 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) * on testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static void i40e_update_itr(struct i40e_q_vector *q_vector, - struct i40e_ring_container *rc) +static void iavf_update_itr(struct iavf_q_vector *q_vector, + struct iavf_ring_container *rc) { unsigned int avg_wire_size, packets, bytes, itr; unsigned long next_update = jiffies; @@ -428,9 +428,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, /* For Rx we want to push the delay up and default to low latency. * for Tx we want to pull the delay down and default to high latency. */ - itr = i40e_container_is_rx(q_vector, rc) ? - I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY : - I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY; + itr = iavf_container_is_rx(q_vector, rc) ? + IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY : + IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY; /* If we didn't update within up to 1 - 2 jiffies we can assume * that either packets are coming in so slow there hasn't been @@ -454,15 +454,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, packets = rc->total_packets; bytes = rc->total_bytes; - if (i40e_container_is_rx(q_vector, rc)) { + if (iavf_container_is_rx(q_vector, rc)) { /* If Rx there are 1 to 4 packets and bytes are less than * 9000 assume insufficient data to use bulk rate limiting * approach unless Tx is already in bulk rate limiting. We * are likely latency driven. */ if (packets && packets < 4 && bytes < 9000 && - (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { - itr = I40E_ITR_ADAPTIVE_LATENCY; + (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) { + itr = IAVF_ITR_ADAPTIVE_LATENCY; goto adjust_by_size; } } else if (packets < 4) { @@ -471,15 +471,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so * that the Rx can relax. */ - if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && - (q_vector->rx.target_itr & I40E_ITR_MASK) == - I40E_ITR_ADAPTIVE_MAX_USECS) + if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS && + (q_vector->rx.target_itr & IAVF_ITR_MASK) == + IAVF_ITR_ADAPTIVE_MAX_USECS) goto clear_counts; } else if (packets > 32) { /* If we have processed over 32 packets in a single interrupt * for Tx assume we need to switch over to "bulk" mode. */ - rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; + rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY; } /* We have no packets to actually measure against. This means @@ -491,17 +491,17 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, * fixed amount. */ if (packets < 56) { - itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; - if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { - itr &= I40E_ITR_ADAPTIVE_LATENCY; - itr += I40E_ITR_ADAPTIVE_MAX_USECS; + itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC; + if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { + itr &= IAVF_ITR_ADAPTIVE_LATENCY; + itr += IAVF_ITR_ADAPTIVE_MAX_USECS; } goto clear_counts; } if (packets <= 256) { itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); - itr &= I40E_ITR_MASK; + itr &= IAVF_ITR_MASK; /* Between 56 and 112 is our "goldilocks" zone where we are * working out "just right". Just report that our current @@ -516,9 +516,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, * in half per interrupt. */ itr /= 2; - itr &= I40E_ITR_MASK; - if (itr < I40E_ITR_ADAPTIVE_MIN_USECS) - itr = I40E_ITR_ADAPTIVE_MIN_USECS; + itr &= IAVF_ITR_MASK; + if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS) + itr = IAVF_ITR_ADAPTIVE_MIN_USECS; goto clear_counts; } @@ -529,7 +529,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector, * though for smaller packet sizes there isn't much we can do as * NAPI polling will likely be kicking in sooner rather than later. */ - itr = I40E_ITR_ADAPTIVE_BULK; + itr = IAVF_ITR_ADAPTIVE_BULK; adjust_by_size: /* If packet counts are 256 or greater we can assume we have a gross @@ -577,7 +577,7 @@ adjust_by_size: /* If we are in low latency mode halve our delay which doubles the * rate to somewhere between 100K to 16K ints/sec */ - if (itr & I40E_ITR_ADAPTIVE_LATENCY) + if (itr & IAVF_ITR_ADAPTIVE_LATENCY) avg_wire_size /= 2; /* Resultant value is 256 times larger than it needs to be. This @@ -587,12 +587,12 @@ adjust_by_size: * Use addition as we have already recorded the new latency flag * for the ITR value. */ - itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) * - I40E_ITR_ADAPTIVE_MIN_INC; + itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) * + IAVF_ITR_ADAPTIVE_MIN_INC; - if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { - itr &= I40E_ITR_ADAPTIVE_LATENCY; - itr += I40E_ITR_ADAPTIVE_MAX_USECS; + if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { + itr &= IAVF_ITR_ADAPTIVE_LATENCY; + itr += IAVF_ITR_ADAPTIVE_MAX_USECS; } clear_counts: @@ -607,12 +607,12 @@ clear_counts: } /** - * i40evf_setup_tx_descriptors - Allocate the Tx descriptors + * iavf_setup_tx_descriptors - Allocate the Tx descriptors * @tx_ring: the tx ring to set up * * Return 0 on success, negative on error **/ -int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) +int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring) { struct device *dev = tx_ring->dev; int bi_size; @@ -622,13 +622,13 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) /* warn if we are about to overwrite the pointer */ WARN_ON(tx_ring->tx_bi); - bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); if (!tx_ring->tx_bi) goto err; /* round up to nearest 4K */ - tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); + tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); @@ -650,10 +650,10 @@ err: } /** - * i40evf_clean_rx_ring - Free Rx buffers + * iavf_clean_rx_ring - Free Rx buffers * @rx_ring: ring to be cleaned **/ -void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) +void iavf_clean_rx_ring(struct iavf_ring *rx_ring) { unsigned long bi_size; u16 i; @@ -669,7 +669,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; if (!rx_bi->page) continue; @@ -685,9 +685,9 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) /* free resources associated with mapping */ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, - i40e_rx_pg_size(rx_ring), + iavf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, - I40E_RX_DMA_ATTR); + IAVF_RX_DMA_ATTR); __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); @@ -695,7 +695,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) rx_bi->page_offset = 0; } - bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; memset(rx_ring->rx_bi, 0, bi_size); /* Zero out the descriptor ring */ @@ -707,14 +707,14 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) } /** - * i40evf_free_rx_resources - Free Rx resources + * iavf_free_rx_resources - Free Rx resources * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ -void i40evf_free_rx_resources(struct i40e_ring *rx_ring) +void iavf_free_rx_resources(struct iavf_ring *rx_ring) { - i40evf_clean_rx_ring(rx_ring); + iavf_clean_rx_ring(rx_ring); kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; @@ -726,19 +726,19 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring) } /** - * i40evf_setup_rx_descriptors - Allocate Rx descriptors + * iavf_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ -int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) +int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) { struct device *dev = rx_ring->dev; int bi_size; /* warn if we are about to overwrite the pointer */ WARN_ON(rx_ring->rx_bi); - bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); if (!rx_ring->rx_bi) goto err; @@ -746,7 +746,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); @@ -769,11 +769,11 @@ err: } /** - * i40e_release_rx_desc - Store the new tail and head values + * iavf_release_rx_desc - Store the new tail and head values * @rx_ring: ring to bump * @val: new head index **/ -static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; @@ -790,26 +790,26 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40e_rx_offset - Return expected offset into page to access data + * iavf_rx_offset - Return expected offset into page to access data * @rx_ring: Ring we are requesting offset of * * Returns the offset value for ring into the data buffer. */ -static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) +static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) { - return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; + return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0; } /** - * i40e_alloc_mapped_page - recycle or make a new page + * iavf_alloc_mapped_page - recycle or make a new page * @rx_ring: ring to use * @bi: rx_buffer struct to modify * * Returns true if the page was successfully allocated or * reused. **/ -static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *bi) +static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *bi) { struct page *page = bi->page; dma_addr_t dma; @@ -821,7 +821,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, } /* alloc new page for storage */ - page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); + page = dev_alloc_pages(iavf_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_page_failed++; return false; @@ -829,22 +829,22 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, /* map page for use */ dma = dma_map_page_attrs(rx_ring->dev, page, 0, - i40e_rx_pg_size(rx_ring), + iavf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, - I40E_RX_DMA_ATTR); + IAVF_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, i40e_rx_pg_order(rx_ring)); + __free_pages(page, iavf_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_page_failed++; return false; } bi->dma = dma; bi->page = page; - bi->page_offset = i40e_rx_offset(rx_ring); + bi->page_offset = iavf_rx_offset(rx_ring); /* initialize pagecnt_bias to 1 representing we fully own page */ bi->pagecnt_bias = 1; @@ -853,15 +853,15 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, } /** - * i40e_receive_skb - Send a completed packet up the stack + * iavf_receive_skb - Send a completed packet up the stack * @rx_ring: rx ring in play * @skb: packet to send up * @vlan_tag: vlan tag for packet **/ -static void i40e_receive_skb(struct i40e_ring *rx_ring, +static void iavf_receive_skb(struct iavf_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { - struct i40e_q_vector *q_vector = rx_ring->q_vector; + struct iavf_q_vector *q_vector = rx_ring->q_vector; if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (vlan_tag & VLAN_VID_MASK)) @@ -871,27 +871,27 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring, } /** - * i40evf_alloc_rx_buffers - Replace used receive buffers + * iavf_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace * * Returns false if all allocations were successful, true if any fail **/ -bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) { u16 ntu = rx_ring->next_to_use; - union i40e_rx_desc *rx_desc; - struct i40e_rx_buffer *bi; + union iavf_rx_desc *rx_desc; + struct iavf_rx_buffer *bi; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; - rx_desc = I40E_RX_DESC(rx_ring, ntu); + rx_desc = IAVF_RX_DESC(rx_ring, ntu); bi = &rx_ring->rx_bi[ntu]; do { - if (!i40e_alloc_mapped_page(rx_ring, bi)) + if (!iavf_alloc_mapped_page(rx_ring, bi)) goto no_buffers; /* sync the buffer for use by the device */ @@ -909,7 +909,7 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) bi++; ntu++; if (unlikely(ntu == rx_ring->count)) { - rx_desc = I40E_RX_DESC(rx_ring, 0); + rx_desc = IAVF_RX_DESC(rx_ring, 0); bi = rx_ring->rx_bi; ntu = 0; } @@ -921,13 +921,13 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) } while (cleaned_count); if (rx_ring->next_to_use != ntu) - i40e_release_rx_desc(rx_ring, ntu); + iavf_release_rx_desc(rx_ring, ntu); return false; no_buffers: if (rx_ring->next_to_use != ntu) - i40e_release_rx_desc(rx_ring, ntu); + iavf_release_rx_desc(rx_ring, ntu); /* make sure to come back via polling to try again after * allocation failure @@ -936,27 +936,27 @@ no_buffers: } /** - * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum + * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum * @vsi: the VSI we care about * @skb: skb currently being received and modified * @rx_desc: the receive descriptor **/ -static inline void i40e_rx_checksum(struct i40e_vsi *vsi, +static inline void iavf_rx_checksum(struct iavf_vsi *vsi, struct sk_buff *skb, - union i40e_rx_desc *rx_desc) + union iavf_rx_desc *rx_desc) { - struct i40e_rx_ptype_decoded decoded; + struct iavf_rx_ptype_decoded decoded; u32 rx_error, rx_status; bool ipv4, ipv6; u8 ptype; u64 qword; qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT; + rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >> + IAVF_RXD_QW1_ERROR_SHIFT; + rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >> + IAVF_RXD_QW1_STATUS_SHIFT; decoded = decode_rx_desc_ptype(ptype); skb->ip_summed = CHECKSUM_NONE; @@ -968,45 +968,45 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, return; /* did the hardware decode the packet and checksum? */ - if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT))) return; /* both known and outer_ip must be set for the below code to work */ if (!(decoded.known && decoded.outer_ip)) return; - ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); + ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4); + ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6); if (ipv4 && - (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | - BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | + BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT)))) goto checksum_fail; /* likely incorrect csum if alternate IP extension headers found */ if (ipv6 && - rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT)) /* don't increment checksum err here, non-fatal err */ return; /* there was some L4 error, count error and punt packet to the stack */ - if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) + if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT)) goto checksum_fail; /* handle packets that were not able to be checksummed due * to arrival speed, in this case the stack can compute * the csum. */ - if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) + if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT)) return; /* Only report checksum unnecessary for TCP, UDP, or SCTP */ switch (decoded.inner_prot) { - case I40E_RX_PTYPE_INNER_PROT_TCP: - case I40E_RX_PTYPE_INNER_PROT_UDP: - case I40E_RX_PTYPE_INNER_PROT_SCTP: + case IAVF_RX_PTYPE_INNER_PROT_TCP: + case IAVF_RX_PTYPE_INNER_PROT_UDP: + case IAVF_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; /* fall though */ default: @@ -1020,56 +1020,56 @@ checksum_fail: } /** - * i40e_ptype_to_htype - get a hash type + * iavf_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ -static inline int i40e_ptype_to_htype(u8 ptype) +static inline int iavf_ptype_to_htype(u8 ptype) { - struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); + struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); if (!decoded.known) return PKT_HASH_TYPE_NONE; - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) + if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && + decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4) return PKT_HASH_TYPE_L4; - else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) + else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && + decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3) return PKT_HASH_TYPE_L3; else return PKT_HASH_TYPE_L2; } /** - * i40e_rx_hash - set the hash value in the skb + * iavf_rx_hash - set the hash value in the skb * @ring: descriptor ring * @rx_desc: specific descriptor * @skb: skb currently being received and modified * @rx_ptype: Rx packet type **/ -static inline void i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc, +static inline void iavf_rx_hash(struct iavf_ring *ring, + union iavf_rx_desc *rx_desc, struct sk_buff *skb, u8 rx_ptype) { u32 hash; const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << + IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT); if (ring->netdev->features & NETIF_F_RXHASH) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype)); } } /** - * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor + * iavf_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated @@ -1080,13 +1080,13 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, * other fields within the skb. **/ static inline -void i40evf_process_skb_fields(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, struct sk_buff *skb, - u8 rx_ptype) +void iavf_process_skb_fields(struct iavf_ring *rx_ring, + union iavf_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype) { - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); + iavf_rx_checksum(rx_ring->vsi, skb, rx_desc); skb_record_rx_queue(skb, rx_ring->queue_index); @@ -1095,7 +1095,7 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring, } /** - * i40e_cleanup_headers - Correct empty headers + * iavf_cleanup_headers - Correct empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being fixed * @@ -1107,7 +1107,7 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring, * * Returns true if an error was encountered and skb was freed. **/ -static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) +static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) { /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) @@ -1117,16 +1117,16 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) } /** - * i40e_reuse_rx_page - page flip buffer and store it back on the ring + * iavf_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Synchronizes page for reuse by the adapter **/ -static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *old_buff) +static void iavf_reuse_rx_page(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *old_buff) { - struct i40e_rx_buffer *new_buff; + struct iavf_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; new_buff = &rx_ring->rx_bi[nta]; @@ -1143,20 +1143,20 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, } /** - * i40e_page_is_reusable - check if any reuse is possible + * iavf_page_is_reusable - check if any reuse is possible * @page: page struct to check * * A page is not reusable if it was allocated under low memory * conditions, or it's not in the same NUMA node as this CPU. */ -static inline bool i40e_page_is_reusable(struct page *page) +static inline bool iavf_page_is_reusable(struct page *page) { return (page_to_nid(page) == numa_mem_id()) && !page_is_pfmemalloc(page); } /** - * i40e_can_reuse_rx_page - Determine if this page can be reused by + * iavf_can_reuse_rx_page - Determine if this page can be reused by * the adapter for another receive * * @rx_buffer: buffer containing the page @@ -1182,13 +1182,13 @@ static inline bool i40e_page_is_reusable(struct page *page) * * In either case, if the page is reusable its refcount is increased. **/ -static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) +static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; /* Is any reuse possible? */ - if (unlikely(!i40e_page_is_reusable(page))) + if (unlikely(!iavf_page_is_reusable(page))) return false; #if (PAGE_SIZE < 8192) @@ -1196,9 +1196,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) if (unlikely((page_count(page) - pagecnt_bias) > 1)) return false; #else -#define I40E_LAST_OFFSET \ - (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) - if (rx_buffer->page_offset > I40E_LAST_OFFSET) +#define IAVF_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048) + if (rx_buffer->page_offset > IAVF_LAST_OFFSET) return false; #endif @@ -1215,7 +1215,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) } /** - * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff + * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @skb: sk_buff to place the data into @@ -1226,15 +1226,15 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) * * The function will then update the page offset. **/ -static void i40e_add_rx_frag(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer, +static void iavf_add_rx_frag(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *rx_buffer, struct sk_buff *skb, unsigned int size) { #if (PAGE_SIZE < 8192) - unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; + unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)); + unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); #endif skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, @@ -1249,17 +1249,17 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring, } /** - * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use + * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use * @rx_ring: rx descriptor ring to transact packets on * @size: size of buffer to add to skb * * This function will pull an Rx buffer from the ring and synchronize it * for use by the CPU. */ -static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, +static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, const unsigned int size) { - struct i40e_rx_buffer *rx_buffer; + struct iavf_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; prefetchw(rx_buffer->page); @@ -1278,7 +1278,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, } /** - * i40e_construct_skb - Allocate skb and populate it + * iavf_construct_skb - Allocate skb and populate it * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: rx buffer to pull data from * @size: size of buffer to add to skb @@ -1287,13 +1287,13 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, * data from the current receive descriptor, taking care to set up the * skb correctly. */ -static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer, +static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *rx_buffer, unsigned int size) { void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) - unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; + unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(size); #endif @@ -1308,15 +1308,15 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, /* allocate a skb to store the frags */ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - I40E_RX_HDR_SIZE, + IAVF_RX_HDR_SIZE, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; /* Determine available headroom for copy */ headlen = size; - if (headlen > I40E_RX_HDR_SIZE) - headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE); + if (headlen > IAVF_RX_HDR_SIZE) + headlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); @@ -1343,7 +1343,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, } /** - * i40e_build_skb - Build skb around an existing buffer + * iavf_build_skb - Build skb around an existing buffer * @rx_ring: Rx descriptor ring to transact packets on * @rx_buffer: Rx buffer to pull data from * @size: size of buffer to add to skb @@ -1351,16 +1351,16 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, * This function builds an skb around an existing Rx buffer, taking care * to set up the skb correctly and avoid any memcpy overhead. */ -static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer, +static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *rx_buffer, unsigned int size) { void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) - unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; + unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(I40E_SKB_PAD + size); + SKB_DATA_ALIGN(IAVF_SKB_PAD + size); #endif struct sk_buff *skb; @@ -1370,12 +1370,12 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, prefetch(va + L1_CACHE_BYTES); #endif /* build an skb around the page buffer */ - skb = build_skb(va - I40E_SKB_PAD, truesize); + skb = build_skb(va - IAVF_SKB_PAD, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, I40E_SKB_PAD); + skb_reserve(skb, IAVF_SKB_PAD); __skb_put(skb, size); /* buffer is used by skb, update page_offset */ @@ -1389,25 +1389,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, } /** - * i40e_put_rx_buffer - Clean up used buffer and either recycle or free + * iavf_put_rx_buffer - Clean up used buffer and either recycle or free * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: rx buffer to pull data from * * This function will clean up the contents of the rx_buffer. It will * either recycle the buffer or unmap it and free the associated resources. */ -static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer) +static void iavf_put_rx_buffer(struct iavf_ring *rx_ring, + struct iavf_rx_buffer *rx_buffer) { - if (i40e_can_reuse_rx_page(rx_buffer)) { + if (iavf_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ - i40e_reuse_rx_page(rx_ring, rx_buffer); + iavf_reuse_rx_page(rx_ring, rx_buffer); rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, - i40e_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); + iavf_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); } @@ -1417,7 +1417,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, } /** - * i40e_is_non_eop - process handling of non-EOP buffers + * iavf_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer * @skb: Current socket buffer containing buffer in progress @@ -1427,8 +1427,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. **/ -static bool i40e_is_non_eop(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, +static bool iavf_is_non_eop(struct iavf_ring *rx_ring, + union iavf_rx_desc *rx_desc, struct sk_buff *skb) { u32 ntc = rx_ring->next_to_clean + 1; @@ -1437,11 +1437,11 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring->next_to_clean = ntc; - prefetch(I40E_RX_DESC(rx_ring, ntc)); + prefetch(IAVF_RX_DESC(rx_ring, ntc)); /* if we are the last buffer then there is nothing else to do */ -#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) - if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) +#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT) + if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF))) return false; rx_ring->rx_stats.non_eop_descs++; @@ -1450,7 +1450,7 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, } /** - * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process * @@ -1461,29 +1461,29 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, * * Returns amount of work completed **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct sk_buff *skb = rx_ring->skb; - u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring); bool failure = false; while (likely(total_rx_packets < (unsigned int)budget)) { - struct i40e_rx_buffer *rx_buffer; - union i40e_rx_desc *rx_desc; + struct iavf_rx_buffer *rx_buffer; + union iavf_rx_desc *rx_desc; unsigned int size; u16 vlan_tag; u8 rx_ptype; u64 qword; /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + if (cleaned_count >= IAVF_RX_BUFFER_WRITE) { failure = failure || - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); + iavf_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } - rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); + rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean); /* status_error_len will always be zero for unused descriptors * because it's cleared in cleanup, and overlaps with hdr_addr @@ -1498,21 +1498,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ dma_rmb(); - size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >> + IAVF_RXD_QW1_LENGTH_PBUF_SHIFT; if (!size) break; - i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); - rx_buffer = i40e_get_rx_buffer(rx_ring, size); + iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); + rx_buffer = iavf_get_rx_buffer(rx_ring, size); /* retrieve a buffer from the ring */ if (skb) - i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); + iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); else if (ring_uses_build_skb(rx_ring)) - skb = i40e_build_skb(rx_ring, rx_buffer, size); + skb = iavf_build_skb(rx_ring, rx_buffer, size); else - skb = i40e_construct_skb(rx_ring, rx_buffer, size); + skb = iavf_construct_skb(rx_ring, rx_buffer, size); /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -1521,24 +1521,24 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) break; } - i40e_put_rx_buffer(rx_ring, rx_buffer); + iavf_put_rx_buffer(rx_ring, rx_buffer); cleaned_count++; - if (i40e_is_non_eop(rx_ring, rx_desc, skb)) + if (iavf_is_non_eop(rx_ring, rx_desc, skb)) continue; /* ERR_MASK will only have valid bits if EOP set, and * what we are doing here is actually checking - * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in + * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in * the error field */ - if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { + if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) { dev_kfree_skb_any(skb); skb = NULL; continue; } - if (i40e_cleanup_headers(rx_ring, skb)) { + if (iavf_cleanup_headers(rx_ring, skb)) { skb = NULL; continue; } @@ -1547,18 +1547,18 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) total_rx_bytes += skb->len; qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; + rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> + IAVF_RXD_QW1_PTYPE_SHIFT; /* populate checksum, VLAN, and protocol */ - i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? + vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; - i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); - i40e_receive_skb(rx_ring, skb, vlan_tag); + iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); + iavf_receive_skb(rx_ring, skb, vlan_tag); skb = NULL; /* update budget accounting */ @@ -1578,7 +1578,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) return failure ? budget : (int)total_rx_packets; } -static inline u32 i40e_buildreg_itr(const int type, u16 itr) +static inline u32 iavf_buildreg_itr(const int type, u16 itr) { u32 val; @@ -1597,17 +1597,17 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr) * only need to shift by the interval shift - 1 instead of the * full value. */ - itr &= I40E_ITR_MASK; + itr &= IAVF_ITR_MASK; - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | - (itr << (I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1)); + val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | + (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1)); return val; } /* a small macro to shorten up some long lines */ -#define INTREG I40E_VFINT_DYN_CTLN1 +#define INTREG IAVF_VFINT_DYN_CTLN1 /* The act of updating the ITR will cause it to immediately trigger. In order * to prevent this from throwing off adaptive update statistics we defer the @@ -1619,20 +1619,20 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr) #define ITR_COUNTDOWN_START 3 /** - * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt + * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt * @vsi: the VSI we care about * @q_vector: q_vector for which itr is being updated and interrupt enabled * **/ -static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, - struct i40e_q_vector *q_vector) +static inline void iavf_update_enable_itr(struct iavf_vsi *vsi, + struct iavf_q_vector *q_vector) { - struct i40e_hw *hw = &vsi->back->hw; + struct iavf_hw *hw = &vsi->back->hw; u32 intval; /* These will do nothing if dynamic updates are not enabled */ - i40e_update_itr(q_vector, &q_vector->tx); - i40e_update_itr(q_vector, &q_vector->rx); + iavf_update_itr(q_vector, &q_vector->tx); + iavf_update_itr(q_vector, &q_vector->rx); /* This block of logic allows us to get away with only updating * one ITR value with each interrupt. The idea is to perform a @@ -1644,7 +1644,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, */ if (q_vector->rx.target_itr < q_vector->rx.current_itr) { /* Rx ITR needs to be reduced, this is highest priority */ - intval = i40e_buildreg_itr(I40E_RX_ITR, + intval = iavf_buildreg_itr(IAVF_RX_ITR, q_vector->rx.target_itr); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; @@ -1654,29 +1654,29 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, /* Tx ITR needs to be reduced, this is second priority * Tx ITR needs to be increased more than Rx, fourth priority */ - intval = i40e_buildreg_itr(I40E_TX_ITR, + intval = iavf_buildreg_itr(IAVF_TX_ITR, q_vector->tx.target_itr); q_vector->tx.current_itr = q_vector->tx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { /* Rx ITR needs to be increased, third priority */ - intval = i40e_buildreg_itr(I40E_RX_ITR, + intval = iavf_buildreg_itr(IAVF_RX_ITR, q_vector->rx.target_itr); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else { /* No ITR update, lowest priority */ - intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0); if (q_vector->itr_countdown) q_vector->itr_countdown--; } - if (!test_bit(__I40E_VSI_DOWN, vsi->state)) + if (!test_bit(__IAVF_VSI_DOWN, vsi->state)) wr32(hw, INTREG(q_vector->reg_idx), intval); } /** - * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine + * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * @@ -1684,18 +1684,18 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, * * Returns the amount of work done **/ -int i40evf_napi_poll(struct napi_struct *napi, int budget) +int iavf_napi_poll(struct napi_struct *napi, int budget) { - struct i40e_q_vector *q_vector = - container_of(napi, struct i40e_q_vector, napi); - struct i40e_vsi *vsi = q_vector->vsi; - struct i40e_ring *ring; + struct iavf_q_vector *q_vector = + container_of(napi, struct iavf_q_vector, napi); + struct iavf_vsi *vsi = q_vector->vsi; + struct iavf_ring *ring; bool clean_complete = true; bool arm_wb = false; int budget_per_ring; int work_done = 0; - if (test_bit(__I40E_VSI_DOWN, vsi->state)) { + if (test_bit(__IAVF_VSI_DOWN, vsi->state)) { napi_complete(napi); return 0; } @@ -1703,8 +1703,8 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ - i40e_for_each_ring(ring, q_vector->tx) { - if (!i40e_clean_tx_irq(vsi, ring, budget)) { + iavf_for_each_ring(ring, q_vector->tx) { + if (!iavf_clean_tx_irq(vsi, ring, budget)) { clean_complete = false; continue; } @@ -1721,8 +1721,8 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - i40e_for_each_ring(ring, q_vector->rx) { - int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); + iavf_for_each_ring(ring, q_vector->rx) { + int cleaned = iavf_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ @@ -1746,7 +1746,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) napi_complete_done(napi, work_done); /* Force an interrupt */ - i40evf_force_wb(vsi, q_vector); + iavf_force_wb(vsi, q_vector); /* Return budget-1 so that polling stops */ return budget - 1; @@ -1754,24 +1754,24 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) tx_only: if (arm_wb) { q_vector->tx.ring[0].tx_stats.tx_force_wb++; - i40e_enable_wb_on_itr(vsi, q_vector); + iavf_enable_wb_on_itr(vsi, q_vector); } return budget; } - if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR) q_vector->arm_wb_state = false; /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete_done(napi, work_done); - i40e_update_enable_itr(vsi, q_vector); + iavf_update_enable_itr(vsi, q_vector); return min(work_done, budget - 1); } /** - * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * @skb: send buffer * @tx_ring: ring to send buffer on * @flags: the tx flags to be set @@ -1782,9 +1782,9 @@ tx_only: * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ -static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) +static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb, + struct iavf_ring *tx_ring, + u32 *flags) { __be16 protocol = skb->protocol; u32 tx_flags = 0; @@ -1804,8 +1804,8 @@ static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, /* if we have a HW VLAN tag being added, default to the HW one */ if (skb_vlan_tag_present(skb)) { - tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; - tx_flags |= I40E_TX_FLAGS_HW_VLAN; + tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IAVF_TX_FLAGS_HW_VLAN; /* else if it is a SW VLAN, check the next protocol and store the tag */ } else if (protocol == htons(ETH_P_8021Q)) { struct vlan_hdr *vhdr, _vhdr; @@ -1815,8 +1815,8 @@ static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, return -EINVAL; protocol = vhdr->h_vlan_encapsulated_proto; - tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; - tx_flags |= I40E_TX_FLAGS_SW_VLAN; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IAVF_TX_FLAGS_SW_VLAN; } out: @@ -1825,14 +1825,14 @@ out: } /** - * i40e_tso - set up the tso context descriptor + * iavf_tso - set up the tso context descriptor * @first: pointer to first Tx buffer for xmit * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, +static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { struct sk_buff *skb = first->skb; @@ -1923,17 +1923,17 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, first->bytecount += (first->gso_segs - 1) * *hdr_len; /* find the field values */ - cd_cmd = I40E_TX_CTX_DESC_TSO; + cd_cmd = IAVF_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; cd_mss = gso_size; - *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | - (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | - (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) | + (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) | + (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT); return 1; } /** - * i40e_tx_enable_csum - Enable Tx checksum offloads + * iavf_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set @@ -1941,9 +1941,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ -static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, +static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, - struct i40e_ring *tx_ring, + struct iavf_ring *tx_ring, u32 *cd_tunneling) { union { @@ -1968,19 +1968,19 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, l4.hdr = skb_transport_header(skb); /* compute outer L2 header size */ - offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { u32 tunnel = 0; /* define outer network header type */ - if (*tx_flags & I40E_TX_FLAGS_IPV4) { - tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? - I40E_TX_CTX_EXT_IP_IPV4 : - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + if (*tx_flags & IAVF_TX_FLAGS_IPV4) { + tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? + IAVF_TX_CTX_EXT_IP_IPV4 : + IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM; l4_proto = ip.v4->protocol; - } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - tunnel |= I40E_TX_CTX_EXT_IP_IPV6; + } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { + tunnel |= IAVF_TX_CTX_EXT_IP_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; @@ -1992,20 +1992,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* define outer transport */ switch (l4_proto) { case IPPROTO_UDP: - tunnel |= I40E_TXD_CTX_UDP_TUNNELING; - *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + tunnel |= IAVF_TXD_CTX_UDP_TUNNELING; + *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; break; case IPPROTO_GRE: - tunnel |= I40E_TXD_CTX_GRE_TUNNELING; - *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + tunnel |= IAVF_TXD_CTX_GRE_TUNNELING; + *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; break; case IPPROTO_IPIP: case IPPROTO_IPV6: - *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; l4.hdr = skb_inner_network_header(skb); break; default: - if (*tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & IAVF_TX_FLAGS_TSO) return -1; skb_checksum_help(skb); @@ -2014,20 +2014,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* compute outer L3 header size */ tunnel |= ((l4.hdr - ip.hdr) / 4) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT; /* switch IP header pointer from outer to inner header */ ip.hdr = skb_inner_network_header(skb); /* compute tunnel header size */ tunnel |= ((ip.hdr - l4.hdr) / 2) << - I40E_TXD_CTX_QW0_NATLEN_SHIFT; + IAVF_TXD_CTX_QW0_NATLEN_SHIFT; /* indicate if we need to offload outer UDP header */ - if ((*tx_flags & I40E_TX_FLAGS_TSO) && + if ((*tx_flags & IAVF_TX_FLAGS_TSO) && !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) - tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK; /* record tunnel offload values */ *cd_tunneling |= tunnel; @@ -2037,24 +2037,24 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, l4_proto = 0; /* reset type as we transition from outer to inner headers */ - *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); + *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6); if (ip.v4->version == 4) - *tx_flags |= I40E_TX_FLAGS_IPV4; + *tx_flags |= IAVF_TX_FLAGS_IPV4; if (ip.v6->version == 6) - *tx_flags |= I40E_TX_FLAGS_IPV6; + *tx_flags |= IAVF_TX_FLAGS_IPV6; } /* Enable IP checksum offloads */ - if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & IAVF_TX_FLAGS_IPV4) { l4_proto = ip.v4->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? - I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : - I40E_TX_DESC_CMD_IIPT_IPV4; - } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? + IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM : + IAVF_TX_DESC_CMD_IIPT_IPV4; + } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { + cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; @@ -2064,29 +2064,29 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } /* compute inner L3 header size */ - offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; /* Enable L4 checksum offloads */ switch (l4_proto) { case IPPROTO_TCP: /* enable checksum offloads */ - cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; + offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_SCTP: /* enable SCTP checksum offload */ - cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; offset |= (sizeof(struct sctphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_UDP: /* enable UDP checksum offload */ - cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; offset |= (sizeof(struct udphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: - if (*tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & IAVF_TX_FLAGS_TSO) return -1; skb_checksum_help(skb); return 0; @@ -2099,25 +2099,25 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } /** - * i40e_create_tx_ctx Build the Tx context descriptor + * iavf_create_tx_ctx Build the Tx context descriptor * @tx_ring: ring to create the descriptor on * @cd_type_cmd_tso_mss: Quad Word 1 * @cd_tunneling: Quad Word 0 - bits 0-31 * @cd_l2tag2: Quad Word 0 - bits 32-63 **/ -static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, +static void iavf_create_tx_ctx(struct iavf_ring *tx_ring, const u64 cd_type_cmd_tso_mss, const u32 cd_tunneling, const u32 cd_l2tag2) { - struct i40e_tx_context_desc *context_desc; + struct iavf_tx_context_desc *context_desc; int i = tx_ring->next_to_use; - if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && + if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) && !cd_tunneling && !cd_l2tag2) return; /* grab the next descriptor */ - context_desc = I40E_TX_CTXTDESC(tx_ring, i); + context_desc = IAVF_TX_CTXTDESC(tx_ring, i); i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; @@ -2130,7 +2130,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, } /** - * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet + * __iavf_chk_linearize - Check if there are more than 8 buffers per packet * @skb: send buffer * * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire @@ -2142,20 +2142,20 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, * the segment payload in the first descriptor, and another 7 for the * fragments. **/ -bool __i40evf_chk_linearize(struct sk_buff *skb) +bool __iavf_chk_linearize(struct sk_buff *skb) { const struct skb_frag_struct *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ nr_frags = skb_shinfo(skb)->nr_frags; - if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) + if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1)) return false; /* We need to walk through the list and validate that each group * of 6 fragments totals at least gso_size. */ - nr_frags -= I40E_MAX_BUFFER_TXD - 2; + nr_frags -= IAVF_MAX_BUFFER_TXD - 2; frag = &skb_shinfo(skb)->frags[0]; /* Initialize size to the negative value of gso_size minus 1. We @@ -2187,17 +2187,17 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) * figure out what the remainder would be in the last * descriptor associated with the fragment. */ - if (stale_size > I40E_MAX_DATA_PER_TXD) { + if (stale_size > IAVF_MAX_DATA_PER_TXD) { int align_pad = -(stale->page_offset) & - (I40E_MAX_READ_REQ_SIZE - 1); + (IAVF_MAX_READ_REQ_SIZE - 1); sum -= align_pad; stale_size -= align_pad; do { - sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; - stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; - } while (stale_size > I40E_MAX_DATA_PER_TXD); + sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > IAVF_MAX_DATA_PER_TXD); } /* if sum is negative we failed to make sufficient progress */ @@ -2214,20 +2214,20 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) } /** - * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions + * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * * Returns -EBUSY if a stop is needed, else 0 **/ -int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Memory barrier before checking head and tail */ smp_mb(); /* Check again in a case another CPU has just made room available. */ - if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + if (likely(IAVF_DESC_UNUSED(tx_ring) < size)) return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ @@ -2237,7 +2237,7 @@ int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) } /** - * i40evf_tx_map - Build the Tx descriptor + * iavf_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer * @first: first buffer info buffer to use @@ -2246,34 +2246,34 @@ int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc **/ -static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) +static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, + struct iavf_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); struct skb_frag_struct *frag; - struct i40e_tx_buffer *tx_bi; - struct i40e_tx_desc *tx_desc; + struct iavf_tx_buffer *tx_bi; + struct iavf_tx_desc *tx_desc; u16 i = tx_ring->next_to_use; u32 td_tag = 0; dma_addr_t dma; - if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { - td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; - td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> - I40E_TX_FLAGS_VLAN_SHIFT; + if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) { + td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; + td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> + IAVF_TX_FLAGS_VLAN_SHIFT; } first->tx_flags = tx_flags; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); - tx_desc = I40E_TX_DESC(tx_ring, i); + tx_desc = IAVF_TX_DESC(tx_ring, i); tx_bi = first; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { - unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; + unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; @@ -2283,10 +2283,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, dma_unmap_addr_set(tx_bi, dma, dma); /* align size to end of page */ - max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); + max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1); tx_desc->buffer_addr = cpu_to_le64(dma); - while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { + while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) { tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, max_data, td_tag); @@ -2295,14 +2295,14 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, i++; if (i == tx_ring->count) { - tx_desc = I40E_TX_DESC(tx_ring, 0); + tx_desc = IAVF_TX_DESC(tx_ring, 0); i = 0; } dma += max_data; size -= max_data; - max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; + max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; tx_desc->buffer_addr = cpu_to_le64(dma); } @@ -2316,7 +2316,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, i++; if (i == tx_ring->count) { - tx_desc = I40E_TX_DESC(tx_ring, 0); + tx_desc = IAVF_TX_DESC(tx_ring, 0); i = 0; } @@ -2337,10 +2337,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; - i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + iavf_maybe_stop_tx(tx_ring, DESC_NEEDED); /* write last descriptor with RS and EOP bits */ - td_cmd |= I40E_TXD_CMD; + td_cmd |= IAVF_TXD_CMD; tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); @@ -2373,7 +2373,7 @@ dma_error: /* clear dma mappings for failed tx_bi map */ for (;;) { tx_bi = &tx_ring->tx_bi[i]; - i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); + iavf_unmap_and_free_tx_resource(tx_ring, tx_bi); if (tx_bi == first) break; if (i == 0) @@ -2385,18 +2385,18 @@ dma_error: } /** - * i40e_xmit_frame_ring - Sends buffer on Tx ring + * iavf_xmit_frame_ring - Sends buffer on Tx ring * @skb: send buffer * @tx_ring: ring to send buffer on * * Returns NETDEV_TX_OK if sent, else an error code **/ -static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, - struct i40e_ring *tx_ring) +static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, + struct iavf_ring *tx_ring) { - u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; + u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT; u32 cd_tunneling = 0, cd_l2tag2 = 0; - struct i40e_tx_buffer *first; + struct iavf_tx_buffer *first; u32 td_offset = 0; u32 tx_flags = 0; __be16 protocol; @@ -2407,25 +2407,25 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* prefetch the data, we'll need it later */ prefetch(skb->data); - i40e_trace(xmit_frame_ring, skb, tx_ring); + iavf_trace(xmit_frame_ring, skb, tx_ring); - count = i40e_xmit_descriptor_count(skb); - if (i40e_chk_linearize(skb, count)) { + count = iavf_xmit_descriptor_count(skb); + if (iavf_chk_linearize(skb, count)) { if (__skb_linearize(skb)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - count = i40e_txd_use_count(skb->len); + count = iavf_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } - /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, - * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD, * + 4 desc gap to avoid the cache line where head is, * + 1 desc for context descriptor, * otherwise try next time */ - if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } @@ -2437,7 +2437,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, first->gso_segs = 1; /* prepare the xmit flags */ - if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; /* obtain protocol of skb */ @@ -2445,19 +2445,19 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* setup IPv4/IPv6 offloads */ if (protocol == htons(ETH_P_IP)) - tx_flags |= I40E_TX_FLAGS_IPV4; + tx_flags |= IAVF_TX_FLAGS_IPV4; else if (protocol == htons(ETH_P_IPV6)) - tx_flags |= I40E_TX_FLAGS_IPV6; + tx_flags |= IAVF_TX_FLAGS_IPV6; - tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); + tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; else if (tso) - tx_flags |= I40E_TX_FLAGS_TSO; + tx_flags |= IAVF_TX_FLAGS_TSO; /* Always offload the checksum, since it's in the data descriptor */ - tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, + tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); if (tso < 0) goto out_drop; @@ -2465,44 +2465,44 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, skb_tx_timestamp(skb); /* always enable CRC insertion offload */ - td_cmd |= I40E_TX_DESC_CMD_ICRC; + td_cmd |= IAVF_TX_DESC_CMD_ICRC; - i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, + iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); - i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, - td_cmd, td_offset); + iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); return NETDEV_TX_OK; out_drop: - i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); + iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring); dev_kfree_skb_any(first->skb); first->skb = NULL; return NETDEV_TX_OK; } /** - * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer + * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer * @skb: send buffer * @netdev: network interface device structure * * Returns NETDEV_TX_OK if sent, else an error code **/ -netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; + struct iavf_adapter *adapter = netdev_priv(netdev); + struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; /* hardware can't handle really short frames, hardware padding works * beyond this point */ - if (unlikely(skb->len < I40E_MIN_TX_LEN)) { - if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) + if (unlikely(skb->len < IAVF_MIN_TX_LEN)) { + if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len)) return NETDEV_TX_OK; - skb->len = I40E_MIN_TX_LEN; - skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); + skb->len = IAVF_MIN_TX_LEN; + skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN); } - return i40e_xmit_frame_ring(skb, tx_ring); + return iavf_xmit_frame_ring(skb, tx_ring); } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index 3b5a63b3236e..71e7d090f8db 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#ifndef _I40E_TXRX_H_ -#define _I40E_TXRX_H_ +#ifndef _IAVF_TXRX_H_ +#define _IAVF_TXRX_H_ /* Interrupt Throttling and Rate Limiting Goodies */ -#define I40E_DEFAULT_IRQ_WORK 256 +#define IAVF_DEFAULT_IRQ_WORK 256 /* The datasheet for the X710 and XL710 indicate that the maximum value for * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec @@ -13,80 +13,80 @@ * the register value which is divided by 2 lets use the actual values and * avoid an excessive amount of translation. */ -#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ -#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ -#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ -#define I40E_ITR_100K 10 /* all values below must be even */ -#define I40E_ITR_50K 20 -#define I40E_ITR_20K 50 -#define I40E_ITR_18K 60 -#define I40E_ITR_8K 122 -#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */ -#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC) -#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK) -#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC)) - -#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) -#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) +#define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ +#define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */ +#define IAVF_MIN_ITR 2 /* reg uses 2 usec resolution */ +#define IAVF_ITR_100K 10 /* all values below must be even */ +#define IAVF_ITR_50K 20 +#define IAVF_ITR_20K 50 +#define IAVF_ITR_18K 60 +#define IAVF_ITR_8K 122 +#define IAVF_MAX_ITR 8160 /* maximum value as per datasheet */ +#define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC) +#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK) +#define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC)) + +#define IAVF_ITR_RX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC) +#define IAVF_ITR_TX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC) /* 0x40 is the enable bit for interrupt rate limiting, and must be set if * the value of the rate limit is non-zero */ #define INTRL_ENA BIT(6) -#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ +#define IAVF_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) -#define I40E_INTRL_8K 125 /* 8000 ints/sec */ -#define I40E_INTRL_62K 16 /* 62500 ints/sec */ -#define I40E_INTRL_83K 12 /* 83333 ints/sec */ +#define IAVF_INTRL_8K 125 /* 8000 ints/sec */ +#define IAVF_INTRL_62K 16 /* 62500 ints/sec */ +#define IAVF_INTRL_83K 12 /* 83333 ints/sec */ -#define I40E_QUEUE_END_OF_LIST 0x7FF +#define IAVF_QUEUE_END_OF_LIST 0x7FF /* this enum matches hardware bits and is meant to be used by DYN_CTLN * registers and QINT registers or more generally anywhere in the manual * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any * register but instead is a special value meaning "don't update" ITR0/1/2. */ -enum i40e_dyn_idx_t { - I40E_IDX_ITR0 = 0, - I40E_IDX_ITR1 = 1, - I40E_IDX_ITR2 = 2, - I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ +enum iavf_dyn_idx_t { + IAVF_IDX_ITR0 = 0, + IAVF_IDX_ITR1 = 1, + IAVF_IDX_ITR2 = 2, + IAVF_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ }; /* these are indexes into ITRN registers */ -#define I40E_RX_ITR I40E_IDX_ITR0 -#define I40E_TX_ITR I40E_IDX_ITR1 -#define I40E_PE_ITR I40E_IDX_ITR2 +#define IAVF_RX_ITR IAVF_IDX_ITR0 +#define IAVF_TX_ITR IAVF_IDX_ITR1 +#define IAVF_PE_ITR IAVF_IDX_ITR2 /* Supported RSS offloads */ -#define I40E_DEFAULT_RSS_HENA ( \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ - BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) - -#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) +#define IAVF_DEFAULT_RSS_HENA ( \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD)) + +#define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) /* Supported Rx Buffer Sizes (a multiple of 128) */ -#define I40E_RXBUFFER_256 256 -#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ -#define I40E_RXBUFFER_2048 2048 -#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ -#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ +#define IAVF_RXBUFFER_256 256 +#define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ +#define IAVF_RXBUFFER_2048 2048 +#define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ +#define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */ /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, @@ -95,11 +95,11 @@ enum i40e_dyn_idx_t { * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) */ -#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 -#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) -#define i40e_rx_desc i40e_32byte_rx_desc +#define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256 +#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) +#define iavf_rx_desc iavf_32byte_rx_desc -#define I40E_RX_DMA_ATTR \ +#define IAVF_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) /* Attempt to maximize the headroom available for incoming frames. We @@ -113,10 +113,10 @@ enum i40e_dyn_idx_t { * receive path. */ #if (PAGE_SIZE < 8192) -#define I40E_2K_TOO_SMALL_WITH_PADDING \ -((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048)) +#define IAVF_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048)) -static inline int i40e_compute_pad(int rx_buf_len) +static inline int iavf_compute_pad(int rx_buf_len) { int page_size, pad_size; @@ -126,7 +126,7 @@ static inline int i40e_compute_pad(int rx_buf_len) return pad_size; } -static inline int i40e_skb_pad(void) +static inline int iavf_skb_pad(void) { int rx_buf_len; @@ -137,25 +137,25 @@ static inline int i40e_skb_pad(void) * tailroom due to NET_IP_ALIGN possibly shifting us out of * cache-line alignment. */ - if (I40E_2K_TOO_SMALL_WITH_PADDING) - rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + if (IAVF_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); else - rx_buf_len = I40E_RXBUFFER_1536; + rx_buf_len = IAVF_RXBUFFER_1536; /* if needed make room for NET_IP_ALIGN */ rx_buf_len -= NET_IP_ALIGN; - return i40e_compute_pad(rx_buf_len); + return iavf_compute_pad(rx_buf_len); } -#define I40E_SKB_PAD i40e_skb_pad() +#define IAVF_SKB_PAD iavf_skb_pad() #else -#define I40E_2K_TOO_SMALL_WITH_PADDING false -#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#define IAVF_2K_TOO_SMALL_WITH_PADDING false +#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) #endif /** - * i40e_test_staterr - tests bits in Rx descriptor status and error fields + * iavf_test_staterr - tests bits in Rx descriptor status and error fields * @rx_desc: pointer to receive descriptor (in le64 format) * @stat_err_bits: value to mask * @@ -164,7 +164,7 @@ static inline int i40e_skb_pad(void) * The status_error_len doesn't need to be shifted because it begins * at offset zero. */ -static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, +static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc, const u64 stat_err_bits) { return !!(rx_desc->wb.qword1.status_error_len & @@ -172,8 +172,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, } /* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ -#define I40E_RX_INCREMENT(r, i) \ +#define IAVF_RX_INCREMENT(r, i) \ do { \ (i)++; \ if ((i) == (r)->count) \ @@ -181,34 +180,34 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, r->next_to_clean = i; \ } while (0) -#define I40E_RX_NEXT_DESC(r, i, n) \ +#define IAVF_RX_NEXT_DESC(r, i, n) \ do { \ (i)++; \ if ((i) == (r)->count) \ i = 0; \ - (n) = I40E_RX_DESC((r), (i)); \ + (n) = IAVF_RX_DESC((r), (i)); \ } while (0) -#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ +#define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n) \ do { \ - I40E_RX_NEXT_DESC((r), (i), (n)); \ + IAVF_RX_NEXT_DESC((r), (i), (n)); \ prefetch((n)); \ } while (0) -#define I40E_MAX_BUFFER_TXD 8 -#define I40E_MIN_TX_LEN 17 +#define IAVF_MAX_BUFFER_TXD 8 +#define IAVF_MIN_TX_LEN 17 /* The size limit for a transmit buffer in a descriptor is (16K - 1). * In order to align with the read requests we will align the value to * the nearest 4K which represents our maximum read request size. */ -#define I40E_MAX_READ_REQ_SIZE 4096 -#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) -#define I40E_MAX_DATA_PER_TXD_ALIGNED \ - (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) +#define IAVF_MAX_READ_REQ_SIZE 4096 +#define IAVF_MAX_DATA_PER_TXD (16 * 1024 - 1) +#define IAVF_MAX_DATA_PER_TXD_ALIGNED \ + (IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1)) /** - * i40e_txd_use_count - estimate the number of descriptors needed for Tx + * iavf_txd_use_count - estimate the number of descriptors needed for Tx * @size: transmit request size in bytes * * Due to hardware alignment restrictions (4K alignment), we need to @@ -235,31 +234,31 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, * operations into: * return ((size * 85) >> 20) + 1; */ -static inline unsigned int i40e_txd_use_count(unsigned int size) +static inline unsigned int iavf_txd_use_count(unsigned int size) { return ((size * 85) >> 20) + 1; } /* Tx Descriptors needed, worst case */ #define DESC_NEEDED (MAX_SKB_FRAGS + 6) -#define I40E_MIN_DESC_PENDING 4 - -#define I40E_TX_FLAGS_HW_VLAN BIT(1) -#define I40E_TX_FLAGS_SW_VLAN BIT(2) -#define I40E_TX_FLAGS_TSO BIT(3) -#define I40E_TX_FLAGS_IPV4 BIT(4) -#define I40E_TX_FLAGS_IPV6 BIT(5) -#define I40E_TX_FLAGS_FCCRC BIT(6) -#define I40E_TX_FLAGS_FSO BIT(7) -#define I40E_TX_FLAGS_FD_SB BIT(9) -#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) -#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 -#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 -#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 -#define I40E_TX_FLAGS_VLAN_SHIFT 16 - -struct i40e_tx_buffer { - struct i40e_tx_desc *next_to_watch; +#define IAVF_MIN_DESC_PENDING 4 + +#define IAVF_TX_FLAGS_HW_VLAN BIT(1) +#define IAVF_TX_FLAGS_SW_VLAN BIT(2) +#define IAVF_TX_FLAGS_TSO BIT(3) +#define IAVF_TX_FLAGS_IPV4 BIT(4) +#define IAVF_TX_FLAGS_IPV6 BIT(5) +#define IAVF_TX_FLAGS_FCCRC BIT(6) +#define IAVF_TX_FLAGS_FSO BIT(7) +#define IAVF_TX_FLAGS_FD_SB BIT(9) +#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10) +#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define IAVF_TX_FLAGS_VLAN_SHIFT 16 + +struct iavf_tx_buffer { + struct iavf_tx_desc *next_to_watch; union { struct sk_buff *skb; void *raw_buf; @@ -272,7 +271,7 @@ struct i40e_tx_buffer { u32 tx_flags; }; -struct i40e_rx_buffer { +struct iavf_rx_buffer { dma_addr_t dma; struct page *page; #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) @@ -283,12 +282,12 @@ struct i40e_rx_buffer { __u16 pagecnt_bias; }; -struct i40e_queue_stats { +struct iavf_queue_stats { u64 packets; u64 bytes; }; -struct i40e_tx_queue_stats { +struct iavf_tx_queue_stats { u64 restart_queue; u64 tx_busy; u64 tx_done_old; @@ -298,7 +297,7 @@ struct i40e_tx_queue_stats { u64 tx_lost_interrupt; }; -struct i40e_rx_queue_stats { +struct iavf_rx_queue_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buff_failed; @@ -306,34 +305,34 @@ struct i40e_rx_queue_stats { u64 realloc_count; }; -enum i40e_ring_state_t { - __I40E_TX_FDIR_INIT_DONE, - __I40E_TX_XPS_INIT_DONE, - __I40E_RING_STATE_NBITS /* must be last */ +enum iavf_ring_state_t { + __IAVF_TX_FDIR_INIT_DONE, + __IAVF_TX_XPS_INIT_DONE, + __IAVF_RING_STATE_NBITS /* must be last */ }; /* some useful defines for virtchannel interface, which * is the only remaining user of header split */ -#define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_HEADER_SPLIT 1 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 -#define I40E_RX_SPLIT_L2 0x1 -#define I40E_RX_SPLIT_IP 0x2 -#define I40E_RX_SPLIT_TCP_UDP 0x4 -#define I40E_RX_SPLIT_SCTP 0x8 +#define IAVF_RX_DTYPE_NO_SPLIT 0 +#define IAVF_RX_DTYPE_HEADER_SPLIT 1 +#define IAVF_RX_DTYPE_SPLIT_ALWAYS 2 +#define IAVF_RX_SPLIT_L2 0x1 +#define IAVF_RX_SPLIT_IP 0x2 +#define IAVF_RX_SPLIT_TCP_UDP 0x4 +#define IAVF_RX_SPLIT_SCTP 0x8 /* struct that defines a descriptor ring, associated with a VSI */ -struct i40e_ring { - struct i40e_ring *next; /* pointer to next ring in q_vector */ +struct iavf_ring { + struct iavf_ring *next; /* pointer to next ring in q_vector */ void *desc; /* Descriptor ring memory */ struct device *dev; /* Used for DMA mapping */ struct net_device *netdev; /* netdev ring maps to */ union { - struct i40e_tx_buffer *tx_bi; - struct i40e_rx_buffer *rx_bi; + struct iavf_tx_buffer *tx_bi; + struct iavf_rx_buffer *rx_bi; }; - DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS); + DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS); u16 queue_index; /* Queue number of ring */ u8 dcb_tc; /* Traffic class of ring */ u8 __iomem *tail; @@ -361,59 +360,59 @@ struct i40e_ring { u8 packet_stride; u16 flags; -#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) -#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) +#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0) +#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) /* stats structs */ - struct i40e_queue_stats stats; + struct iavf_queue_stats stats; struct u64_stats_sync syncp; union { - struct i40e_tx_queue_stats tx_stats; - struct i40e_rx_queue_stats rx_stats; + struct iavf_tx_queue_stats tx_stats; + struct iavf_rx_queue_stats rx_stats; }; unsigned int size; /* length of descriptor ring in bytes */ dma_addr_t dma; /* physical address of ring */ - struct i40e_vsi *vsi; /* Backreference to associated VSI */ - struct i40e_q_vector *q_vector; /* Backreference to associated vector */ + struct iavf_vsi *vsi; /* Backreference to associated VSI */ + struct iavf_q_vector *q_vector; /* Backreference to associated vector */ struct rcu_head rcu; /* to avoid race on free */ u16 next_to_alloc; - struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must + struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must * return before it sees the EOP for * the current packet, we save that skb * here and resume receiving this * packet the next time - * i40evf_clean_rx_ring_irq() is called + * iavf_clean_rx_ring_irq() is called * for this ring. */ } ____cacheline_internodealigned_in_smp; -static inline bool ring_uses_build_skb(struct i40e_ring *ring) +static inline bool ring_uses_build_skb(struct iavf_ring *ring) { - return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED); + return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED); } -static inline void set_ring_build_skb_enabled(struct i40e_ring *ring) +static inline void set_ring_build_skb_enabled(struct iavf_ring *ring) { - ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED; + ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED; } -static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) +static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring) { - ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; + ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED; } -#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002 -#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002 -#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e -#define I40E_ITR_ADAPTIVE_LATENCY 0x8000 -#define I40E_ITR_ADAPTIVE_BULK 0x0000 -#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) +#define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002 +#define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002 +#define IAVF_ITR_ADAPTIVE_MAX_USECS 0x007e +#define IAVF_ITR_ADAPTIVE_LATENCY 0x8000 +#define IAVF_ITR_ADAPTIVE_BULK 0x0000 +#define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY)) -struct i40e_ring_container { - struct i40e_ring *ring; /* pointer to linked list of ring(s) */ +struct iavf_ring_container { + struct iavf_ring *ring; /* pointer to linked list of ring(s) */ unsigned long next_update; /* jiffies value of next update */ unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_packets; /* total packets processed this int */ @@ -423,10 +422,10 @@ struct i40e_ring_container { }; /* iterator for handling rings in ring container */ -#define i40e_for_each_ring(pos, head) \ +#define iavf_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) +static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring) { #if (PAGE_SIZE < 8192) if (ring->rx_buf_len > (PAGE_SIZE / 2)) @@ -435,25 +434,25 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) return 0; } -#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring)) - -bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); -netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); -void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); -int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring); -int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); -void i40evf_free_tx_resources(struct i40e_ring *tx_ring); -void i40evf_free_rx_resources(struct i40e_ring *rx_ring); -int i40evf_napi_poll(struct napi_struct *napi, int budget); -void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); -u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); -void i40evf_detect_recover_hung(struct i40e_vsi *vsi); -int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); -bool __i40evf_chk_linearize(struct sk_buff *skb); +#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring)) + +bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count); +netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +void iavf_clean_tx_ring(struct iavf_ring *tx_ring); +void iavf_clean_rx_ring(struct iavf_ring *rx_ring); +int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring); +int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring); +void iavf_free_tx_resources(struct iavf_ring *tx_ring); +void iavf_free_rx_resources(struct iavf_ring *rx_ring); +int iavf_napi_poll(struct napi_struct *napi, int budget); +void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector); +u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw); +void iavf_detect_recover_hung(struct iavf_vsi *vsi); +int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size); +bool __iavf_chk_linearize(struct sk_buff *skb); /** - * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed + * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed * @skb: send buffer * @tx_ring: ring to send buffer on * @@ -461,14 +460,14 @@ bool __i40evf_chk_linearize(struct sk_buff *skb); * there is not enough descriptors available in this ring since we need at least * one descriptor. **/ -static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) +static inline int iavf_xmit_descriptor_count(struct sk_buff *skb) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; int count = 0, size = skb_headlen(skb); for (;;) { - count += i40e_txd_use_count(size); + count += iavf_txd_use_count(size); if (!nr_frags--) break; @@ -480,21 +479,21 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) } /** - * i40e_maybe_stop_tx - 1st level check for Tx stop conditions + * iavf_maybe_stop_tx - 1st level check for Tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * * Returns 0 if stop is not needed **/ -static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) { - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + if (likely(IAVF_DESC_UNUSED(tx_ring) >= size)) return 0; - return __i40evf_maybe_stop_tx(tx_ring, size); + return __iavf_maybe_stop_tx(tx_ring, size); } /** - * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * iavf_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer * @count: number of buffers used * @@ -502,23 +501,23 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb. **/ -static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) +static inline bool iavf_chk_linearize(struct sk_buff *skb, int count) { /* Both TSO and single send will work if count is less than 8 */ - if (likely(count < I40E_MAX_BUFFER_TXD)) + if (likely(count < IAVF_MAX_BUFFER_TXD)) return false; if (skb_is_gso(skb)) - return __i40evf_chk_linearize(skb); + return __iavf_chk_linearize(skb); /* we can support up to 8 data buffers for a single send */ - return count != I40E_MAX_BUFFER_TXD; + return count != IAVF_MAX_BUFFER_TXD; } /** * @ring: Tx ring to find the netdev equivalent of **/ -static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) +static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring) { return netdev_get_tx_queue(ring->netdev, ring->queue_index); } -#endif /* _I40E_TXRX_H_ */ +#endif /* _IAVF_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h new file mode 100644 index 000000000000..ca89583613fb --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h @@ -0,0 +1,688 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_TYPE_H_ +#define _IAVF_TYPE_H_ + +#include "iavf_status.h" +#include "iavf_osdep.h" +#include "iavf_register.h" +#include "i40e_adminq.h" +#include "iavf_devids.h" + +#define IAVF_RXQ_CTX_DBUFF_SHIFT 7 + +/* IAVF_MASK is a macro used on 32 bit registers */ +#define IAVF_MASK(mask, shift) ((u32)(mask) << (shift)) + +#define IAVF_MAX_VSI_QP 16 +#define IAVF_MAX_VF_VSI 3 +#define IAVF_MAX_CHAINED_RX_BUFFERS 5 + +/* forward declaration */ +struct iavf_hw; +typedef void (*I40E_ADMINQ_CALLBACK)(struct iavf_hw *, struct i40e_aq_desc *); + +/* Data type manipulation macros. */ + +#define IAVF_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* bitfields for Tx queue mapping in QTX_CTL */ +#define IAVF_QTX_CTL_VF_QUEUE 0x0 +#define IAVF_QTX_CTL_VM_QUEUE 0x1 +#define IAVF_QTX_CTL_PF_QUEUE 0x2 + +/* debug masks - set these bits in hw->debug_mask to control output */ +enum iavf_debug_mask { + IAVF_DEBUG_INIT = 0x00000001, + IAVF_DEBUG_RELEASE = 0x00000002, + + IAVF_DEBUG_LINK = 0x00000010, + IAVF_DEBUG_PHY = 0x00000020, + IAVF_DEBUG_HMC = 0x00000040, + IAVF_DEBUG_NVM = 0x00000080, + IAVF_DEBUG_LAN = 0x00000100, + IAVF_DEBUG_FLOW = 0x00000200, + IAVF_DEBUG_DCB = 0x00000400, + IAVF_DEBUG_DIAG = 0x00000800, + IAVF_DEBUG_FD = 0x00001000, + IAVF_DEBUG_PACKAGE = 0x00002000, + + IAVF_DEBUG_AQ_MESSAGE = 0x01000000, + IAVF_DEBUG_AQ_DESCRIPTOR = 0x02000000, + IAVF_DEBUG_AQ_DESC_BUFFER = 0x04000000, + IAVF_DEBUG_AQ_COMMAND = 0x06000000, + IAVF_DEBUG_AQ = 0x0F000000, + + IAVF_DEBUG_USER = 0xF0000000, + + IAVF_DEBUG_ALL = 0xFFFFFFFF +}; + +/* These are structs for managing the hardware information and the operations. + * The structures of function pointers are filled out at init time when we + * know for sure exactly which hardware we're working with. This gives us the + * flexibility of using the same main driver code but adapting to slightly + * different hardware needs as new parts are developed. For this architecture, + * the Firmware and AdminQ are intended to insulate the driver from most of the + * future changes, but these structures will also do part of the job. + */ +enum iavf_mac_type { + IAVF_MAC_UNKNOWN = 0, + IAVF_MAC_XL710, + IAVF_MAC_VF, + IAVF_MAC_X722, + IAVF_MAC_X722_VF, + IAVF_MAC_GENERIC, +}; + +enum iavf_vsi_type { + IAVF_VSI_MAIN = 0, + IAVF_VSI_VMDQ1 = 1, + IAVF_VSI_VMDQ2 = 2, + IAVF_VSI_CTRL = 3, + IAVF_VSI_FCOE = 4, + IAVF_VSI_MIRROR = 5, + IAVF_VSI_SRIOV = 6, + IAVF_VSI_FDIR = 7, + IAVF_VSI_TYPE_UNKNOWN +}; + +enum iavf_queue_type { + IAVF_QUEUE_TYPE_RX = 0, + IAVF_QUEUE_TYPE_TX, + IAVF_QUEUE_TYPE_PE_CEQ, + IAVF_QUEUE_TYPE_UNKNOWN +}; + +#define IAVF_HW_CAP_MAX_GPIO 30 +/* Capabilities of a PF or a VF or the whole device */ +struct iavf_hw_capabilities { + bool dcb; + bool fcoe; + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors_vf; +}; + +struct iavf_mac_info { + enum iavf_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +/* PCI bus types */ +enum iavf_bus_type { + iavf_bus_type_unknown = 0, + iavf_bus_type_pci, + iavf_bus_type_pcix, + iavf_bus_type_pci_express, + iavf_bus_type_reserved +}; + +/* PCI bus speeds */ +enum iavf_bus_speed { + iavf_bus_speed_unknown = 0, + iavf_bus_speed_33 = 33, + iavf_bus_speed_66 = 66, + iavf_bus_speed_100 = 100, + iavf_bus_speed_120 = 120, + iavf_bus_speed_133 = 133, + iavf_bus_speed_2500 = 2500, + iavf_bus_speed_5000 = 5000, + iavf_bus_speed_8000 = 8000, + iavf_bus_speed_reserved +}; + +/* PCI bus widths */ +enum iavf_bus_width { + iavf_bus_width_unknown = 0, + iavf_bus_width_pcie_x1 = 1, + iavf_bus_width_pcie_x2 = 2, + iavf_bus_width_pcie_x4 = 4, + iavf_bus_width_pcie_x8 = 8, + iavf_bus_width_32 = 32, + iavf_bus_width_64 = 64, + iavf_bus_width_reserved +}; + +/* Bus parameters */ +struct iavf_bus_info { + enum iavf_bus_speed speed; + enum iavf_bus_width width; + enum iavf_bus_type type; + + u16 func; + u16 device; + u16 lan_id; + u16 bus_id; +}; + +#define IAVF_MAX_USER_PRIORITY 8 +/* Port hardware description */ +struct iavf_hw { + u8 __iomem *hw_addr; + void *back; + + /* subsystem structs */ + struct iavf_mac_info mac; + struct iavf_bus_info bus; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + + /* capabilities for entire device and PCI func */ + struct iavf_hw_capabilities dev_caps; + + /* Admin Queue info */ + struct iavf_adminq_info aq; + + /* debug mask */ + u32 debug_mask; + char err_str[16]; +}; + +struct iavf_driver_version { + u8 major_version; + u8 minor_version; + u8 build_version; + u8 subbuild_version; + u8 driver_string[32]; +}; + +/* RX Descriptors */ +union iavf_16byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow director filter id */ + __le32 fcoe_param; /* FCoE DDP Context id */ + } hi_dword; + } qword0; + struct { + /* ext status/error/pktype/length */ + __le64 status_error_len; + } qword1; + } wb; /* writeback */ +}; + +union iavf_32byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_buffer_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; + } hi_dword; + } qword0; + struct { + /* status/error/pktype/length */ + __le64 status_error_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + union { + __le32 flex_bytes_lo; + __le32 pe_status; + } lo_dword; + union { + __le32 flex_bytes_hi; + __le32 fd_id; + } hi_dword; + } qword3; + } wb; /* writeback */ +}; + +enum iavf_rx_desc_status_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_DESC_STATUS_DD_SHIFT = 0, + IAVF_RX_DESC_STATUS_EOF_SHIFT = 1, + IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, + IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3, + IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4, + IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ + IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, + /* Note: Bit 8 is reserved in X710 and XL710 */ + IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, + IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ + IAVF_RX_DESC_STATUS_FLM_SHIFT = 11, + IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ + IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14, + IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, + IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ + /* Note: For non-tunnel packets INT_UDP_0 is the right status for + * UDP header + */ + IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, + IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */ +}; + +#define IAVF_RXD_QW1_STATUS_SHIFT 0 +#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \ + << IAVF_RXD_QW1_STATUS_SHIFT) + +#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT +#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ + IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT) + +#define IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT +#define IAVF_RXD_QW1_STATUS_TSYNVALID_MASK \ + BIT_ULL(IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT) + +enum iavf_rx_desc_fltstat_values { + IAVF_RX_DESC_FLTSTAT_NO_DATA = 0, + IAVF_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + IAVF_RX_DESC_FLTSTAT_RSV = 2, + IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +#define IAVF_RXD_QW1_ERROR_SHIFT 19 +#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT) + +enum iavf_rx_desc_error_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_DESC_ERROR_RXE_SHIFT = 0, + IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1, + IAVF_RX_DESC_ERROR_HBO_SHIFT = 2, + IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ + IAVF_RX_DESC_ERROR_IPE_SHIFT = 3, + IAVF_RX_DESC_ERROR_L4E_SHIFT = 4, + IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5, + IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, + IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7 +}; + +enum iavf_rx_desc_error_l3l4e_fcoe_masks { + IAVF_RX_DESC_ERROR_L3L4E_NONE = 0, + IAVF_RX_DESC_ERROR_L3L4E_PROT = 1, + IAVF_RX_DESC_ERROR_L3L4E_FC = 2, + IAVF_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, + IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 +}; + +#define IAVF_RXD_QW1_PTYPE_SHIFT 30 +#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT) + +/* Packet type non-ip values */ +enum iavf_rx_l2_ptype { + IAVF_RX_PTYPE_L2_RESERVED = 0, + IAVF_RX_PTYPE_L2_MAC_PAY2 = 1, + IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + IAVF_RX_PTYPE_L2_FIP_PAY2 = 3, + IAVF_RX_PTYPE_L2_OUI_PAY2 = 4, + IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6, + IAVF_RX_PTYPE_L2_ECP_PAY2 = 7, + IAVF_RX_PTYPE_L2_EVB_PAY2 = 8, + IAVF_RX_PTYPE_L2_QCN_PAY2 = 9, + IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10, + IAVF_RX_PTYPE_L2_ARP = 11, + IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12, + IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, + IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, + IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, + IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, + IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, + IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, + IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, + IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, + IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, + IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, + IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, + IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, + IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 +}; + +struct iavf_rx_ptype_decoded { + u32 ptype:8; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:1; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +enum iavf_rx_ptype_outer_ip { + IAVF_RX_PTYPE_OUTER_L2 = 0, + IAVF_RX_PTYPE_OUTER_IP = 1 +}; + +enum iavf_rx_ptype_outer_ip_ver { + IAVF_RX_PTYPE_OUTER_NONE = 0, + IAVF_RX_PTYPE_OUTER_IPV4 = 0, + IAVF_RX_PTYPE_OUTER_IPV6 = 1 +}; + +enum iavf_rx_ptype_outer_fragmented { + IAVF_RX_PTYPE_NOT_FRAG = 0, + IAVF_RX_PTYPE_FRAG = 1 +}; + +enum iavf_rx_ptype_tunnel_type { + IAVF_RX_PTYPE_TUNNEL_NONE = 0, + IAVF_RX_PTYPE_TUNNEL_IP_IP = 1, + IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum iavf_rx_ptype_tunnel_end_prot { + IAVF_RX_PTYPE_TUNNEL_END_NONE = 0, + IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1, + IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum iavf_rx_ptype_inner_prot { + IAVF_RX_PTYPE_INNER_PROT_NONE = 0, + IAVF_RX_PTYPE_INNER_PROT_UDP = 1, + IAVF_RX_PTYPE_INNER_PROT_TCP = 2, + IAVF_RX_PTYPE_INNER_PROT_SCTP = 3, + IAVF_RX_PTYPE_INNER_PROT_ICMP = 4, + IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5 +}; + +enum iavf_rx_ptype_payload_layer { + IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38 +#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ + IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) + +#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52 +#define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ + IAVF_RXD_QW1_LENGTH_HBUF_SHIFT) + +#define IAVF_RXD_QW1_LENGTH_SPH_SHIFT 63 +#define IAVF_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(IAVF_RXD_QW1_LENGTH_SPH_SHIFT) + +enum iavf_rx_desc_ext_status_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, + IAVF_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, + IAVF_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ + IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ + IAVF_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, + IAVF_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, + IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, +}; + +enum iavf_rx_desc_pe_status_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ + IAVF_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ + IAVF_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ + IAVF_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, + IAVF_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, + IAVF_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, + IAVF_RX_DESC_PE_STATUS_URG_SHIFT = 27, + IAVF_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, + IAVF_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 +}; + +#define IAVF_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 +#define IAVF_RX_PROG_STATUS_DESC_LENGTH 0x2000000 + +#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 +#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ + IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) + +#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 +#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ + IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) + +enum iavf_rx_prog_status_desc_status_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_PROG_STATUS_DESC_DD_SHIFT = 0, + IAVF_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ +}; + +enum iavf_rx_prog_status_desc_prog_id_masks { + IAVF_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, + IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, + IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, +}; + +enum iavf_rx_prog_status_desc_error_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, + IAVF_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, + IAVF_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, + IAVF_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 +}; + +/* TX Descriptor */ +struct iavf_tx_desc { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 cmd_type_offset_bsz; +}; + +#define IAVF_TXD_QW1_DTYPE_SHIFT 0 +#define IAVF_TXD_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT) + +enum iavf_tx_desc_dtype_value { + IAVF_TX_DESC_DTYPE_DATA = 0x0, + IAVF_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ + IAVF_TX_DESC_DTYPE_CONTEXT = 0x1, + IAVF_TX_DESC_DTYPE_FCOE_CTX = 0x2, + IAVF_TX_DESC_DTYPE_FILTER_PROG = 0x8, + IAVF_TX_DESC_DTYPE_DDP_CTX = 0x9, + IAVF_TX_DESC_DTYPE_FLEX_DATA = 0xB, + IAVF_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, + IAVF_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, + IAVF_TX_DESC_DTYPE_DESC_DONE = 0xF +}; + +#define IAVF_TXD_QW1_CMD_SHIFT 4 +#define IAVF_TXD_QW1_CMD_MASK (0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT) + +enum iavf_tx_desc_cmd_bits { + IAVF_TX_DESC_CMD_EOP = 0x0001, + IAVF_TX_DESC_CMD_RS = 0x0002, + IAVF_TX_DESC_CMD_ICRC = 0x0004, + IAVF_TX_DESC_CMD_IL2TAG1 = 0x0008, + IAVF_TX_DESC_CMD_DUMMY = 0x0010, + IAVF_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ + IAVF_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ + IAVF_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ + IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ + IAVF_TX_DESC_CMD_FCOET = 0x0080, + IAVF_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ + IAVF_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ +}; + +#define IAVF_TXD_QW1_OFFSET_SHIFT 16 +#define IAVF_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ + IAVF_TXD_QW1_OFFSET_SHIFT) + +enum iavf_tx_desc_length_fields { + /* Note: These are predefined bit offsets */ + IAVF_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ +}; + +#define IAVF_TXD_QW1_TX_BUF_SZ_SHIFT 34 +#define IAVF_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ + IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) + +#define IAVF_TXD_QW1_L2TAG1_SHIFT 48 +#define IAVF_TXD_QW1_L2TAG1_MASK (0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT) + +/* Context descriptors */ +struct iavf_tx_context_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd; + __le64 type_cmd_tso_mss; +}; + +#define IAVF_TXD_CTX_QW1_CMD_SHIFT 4 +#define IAVF_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT) + +enum iavf_tx_ctx_desc_cmd_bits { + IAVF_TX_CTX_DESC_TSO = 0x01, + IAVF_TX_CTX_DESC_TSYN = 0x02, + IAVF_TX_CTX_DESC_IL2TAG2 = 0x04, + IAVF_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, + IAVF_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + IAVF_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + IAVF_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + IAVF_TX_CTX_DESC_SWTCH_VSI = 0x30, + IAVF_TX_CTX_DESC_SWPE = 0x40 +}; + +/* Packet Classifier Types for filters */ +enum iavf_filter_pctype { + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, + IAVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31, + IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, + IAVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33, + IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, + IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, + IAVF_FILTER_PCTYPE_FRAG_IPV4 = 36, + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, + IAVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, + IAVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43, + IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, + IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, + IAVF_FILTER_PCTYPE_FRAG_IPV6 = 46, + /* Note: Value 47 is reserved for future use */ + IAVF_FILTER_PCTYPE_FCOE_OX = 48, + IAVF_FILTER_PCTYPE_FCOE_RX = 49, + IAVF_FILTER_PCTYPE_FCOE_OTHER = 50, + /* Note: Values 51-62 are reserved for future use */ + IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63, +}; + +#define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30 +#define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ + IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) + +#define IAVF_TXD_CTX_QW1_MSS_SHIFT 50 +#define IAVF_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ + IAVF_TXD_CTX_QW1_MSS_SHIFT) + +#define IAVF_TXD_CTX_QW1_VSI_SHIFT 50 +#define IAVF_TXD_CTX_QW1_VSI_MASK (0x1FFULL << IAVF_TXD_CTX_QW1_VSI_SHIFT) + +#define IAVF_TXD_CTX_QW0_EXT_IP_SHIFT 0 +#define IAVF_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ + IAVF_TXD_CTX_QW0_EXT_IP_SHIFT) + +enum iavf_tx_ctx_desc_eipt_offload { + IAVF_TX_CTX_EXT_IP_NONE = 0x0, + IAVF_TX_CTX_EXT_IP_IPV6 = 0x1, + IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, + IAVF_TX_CTX_EXT_IP_IPV4 = 0x3 +}; + +#define IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 +#define IAVF_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ + IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT) + +#define IAVF_TXD_CTX_QW0_NATT_SHIFT 9 +#define IAVF_TXD_CTX_QW0_NATT_MASK (0x3ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT) + +#define IAVF_TXD_CTX_UDP_TUNNELING BIT_ULL(IAVF_TXD_CTX_QW0_NATT_SHIFT) +#define IAVF_TXD_CTX_GRE_TUNNELING (0x2ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT) + +#define IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 +#define IAVF_TXD_CTX_QW0_EIP_NOINC_MASK \ + BIT_ULL(IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT) + +#define IAVF_TXD_CTX_EIP_NOINC_IPID_CONST IAVF_TXD_CTX_QW0_EIP_NOINC_MASK + +#define IAVF_TXD_CTX_QW0_NATLEN_SHIFT 12 +#define IAVF_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ + IAVF_TXD_CTX_QW0_NATLEN_SHIFT) + +#define IAVF_TXD_CTX_QW0_DECTTL_SHIFT 19 +#define IAVF_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ + IAVF_TXD_CTX_QW0_DECTTL_SHIFT) + +#define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT 23 +#define IAVF_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT) + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct iavf_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; +#endif /* _IAVF_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 565677de5ba3..e64751da0921 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1,16 +1,16 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40evf.h" -#include "i40e_prototype.h" -#include "i40evf_client.h" +#include "iavf.h" +#include "iavf_prototype.h" +#include "iavf_client.h" /* busy wait delay in msec */ -#define I40EVF_BUSY_WAIT_DELAY 10 -#define I40EVF_BUSY_WAIT_COUNT 50 +#define IAVF_BUSY_WAIT_DELAY 10 +#define IAVF_BUSY_WAIT_COUNT 50 /** - * i40evf_send_pf_msg + * iavf_send_pf_msg * @adapter: adapter structure * @op: virtual channel opcode * @msg: pointer to message buffer @@ -18,44 +18,44 @@ * * Send message to PF and print status if failure. **/ -static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, - enum virtchnl_ops op, u8 *msg, u16 len) +static int iavf_send_pf_msg(struct iavf_adapter *adapter, + enum virtchnl_ops op, u8 *msg, u16 len) { - struct i40e_hw *hw = &adapter->hw; - i40e_status err; + struct iavf_hw *hw = &adapter->hw; + iavf_status err; - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) return 0; /* nothing to see here, move along */ - err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); + err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); if (err) dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", - op, i40evf_stat_str(hw, err), - i40evf_aq_str(hw, hw->aq.asq_last_status)); + op, iavf_stat_str(hw, err), + iavf_aq_str(hw, hw->aq.asq_last_status)); return err; } /** - * i40evf_send_api_ver + * iavf_send_api_ver * @adapter: adapter structure * * Send API version admin queue message to the PF. The reply is not checked * in this function. Returns 0 if the message was successfully * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. **/ -int i40evf_send_api_ver(struct i40evf_adapter *adapter) +int iavf_send_api_ver(struct iavf_adapter *adapter) { struct virtchnl_version_info vvi; vvi.major = VIRTCHNL_VERSION_MAJOR; vvi.minor = VIRTCHNL_VERSION_MINOR; - return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, - sizeof(vvi)); + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, + sizeof(vvi)); } /** - * i40evf_verify_api_ver + * iavf_verify_api_ver * @adapter: adapter structure * * Compare API versions with the PF. Must be called after admin queue is @@ -63,15 +63,15 @@ int i40evf_send_api_ver(struct i40evf_adapter *adapter) * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors * from the firmware are propagated. **/ -int i40evf_verify_api_ver(struct i40evf_adapter *adapter) +int iavf_verify_api_ver(struct iavf_adapter *adapter) { struct virtchnl_version_info *pf_vvi; - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; struct i40e_arq_event_info event; enum virtchnl_ops op; - i40e_status err; + iavf_status err; - event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { err = -ENOMEM; @@ -79,8 +79,8 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) } while (1) { - err = i40evf_clean_arq_element(hw, &event, NULL); - /* When the AQ is empty, i40evf_clean_arq_element will return + err = iavf_clean_arq_element(hw, &event, NULL); + /* When the AQ is empty, iavf_clean_arq_element will return * nonzero and this loop will terminate. */ if (err) @@ -92,7 +92,7 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) } - err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + err = (iavf_status)le32_to_cpu(event.desc.cookie_low); if (err) goto out_alloc; @@ -118,14 +118,14 @@ out: } /** - * i40evf_send_vf_config_msg + * iavf_send_vf_config_msg * @adapter: adapter structure * * Send VF configuration request admin queue message to the PF. The reply * is not checked in this function. Returns 0 if the message was * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. **/ -int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) +int iavf_send_vf_config_msg(struct iavf_adapter *adapter) { u32 caps; @@ -142,19 +142,43 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_ADQ; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; - adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; if (PF_IS_V11(adapter)) - return i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_GET_VF_RESOURCES, - (u8 *)&caps, sizeof(caps)); + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, + (u8 *)&caps, sizeof(caps)); else - return i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_GET_VF_RESOURCES, - NULL, 0); + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, + NULL, 0); } /** - * i40evf_get_vf_config + * iavf_validate_num_queues + * @adapter: adapter structure + * + * Validate that the number of queues the PF has sent in + * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. + **/ +static void iavf_validate_num_queues(struct iavf_adapter *adapter) +{ + if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { + struct virtchnl_vsi_resource *vsi_res; + int i; + + dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", + adapter->vf_res->num_queue_pairs, + IAVF_MAX_REQ_QUEUES); + dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", + IAVF_MAX_REQ_QUEUES); + adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + vsi_res = &adapter->vf_res->vsi_res[i]; + vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; + } + } +} + +/** + * iavf_get_vf_config * @adapter: private adapter structure * * Get VF configuration from PF and populate hw structure. Must be called after @@ -162,16 +186,16 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) * with maximum timeout. Response from PF is returned in the buffer for further * processing by the caller. **/ -int i40evf_get_vf_config(struct i40evf_adapter *adapter) +int iavf_get_vf_config(struct iavf_adapter *adapter) { - struct i40e_hw *hw = &adapter->hw; + struct iavf_hw *hw = &adapter->hw; struct i40e_arq_event_info event; enum virtchnl_ops op; - i40e_status err; + iavf_status err; u16 len; len = sizeof(struct virtchnl_vf_resource) + - I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); + IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); event.buf_len = len; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { @@ -180,10 +204,10 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) } while (1) { - /* When the AQ is empty, i40evf_clean_arq_element will return + /* When the AQ is empty, iavf_clean_arq_element will return * nonzero and this loop will terminate. */ - err = i40evf_clean_arq_element(hw, &event, NULL); + err = iavf_clean_arq_element(hw, &event, NULL); if (err) goto out_alloc; op = @@ -192,10 +216,15 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) break; } - err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + err = (iavf_status)le32_to_cpu(event.desc.cookie_low); memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); - i40e_vf_parse_hw_config(hw, adapter->vf_res); + /* some PFs send more queues than we should have so validate that + * we aren't getting too many queues + */ + if (!err) + iavf_validate_num_queues(adapter); + iavf_vf_parse_hw_config(hw, adapter->vf_res); out_alloc: kfree(event.msg_buf); out: @@ -203,17 +232,17 @@ out: } /** - * i40evf_configure_queues + * iavf_configure_queues * @adapter: adapter structure * * Request that the PF set up our (previously allocated) queues. **/ -void i40evf_configure_queues(struct i40evf_adapter *adapter) +void iavf_configure_queues(struct iavf_adapter *adapter) { struct virtchnl_vsi_queue_config_info *vqci; struct virtchnl_queue_pair_info *vqpi; int pairs = adapter->num_active_queues; - int i, len, max_frame = I40E_MAX_RXBUFFER; + int i, len, max_frame = IAVF_MAX_RXBUFFER; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -229,9 +258,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) return; /* Limit maximum frame size when jumbo frames is not enabled */ - if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) && + if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) && (adapter->netdev->mtu <= ETH_DATA_LEN)) - max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; vqci->vsi_id = adapter->vsi_res->vsi_id; vqci->num_queue_pairs = pairs; @@ -251,23 +280,23 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) vqpi->rxq.max_pkt_size = max_frame; vqpi->rxq.databuffer_size = ALIGN(adapter->rx_rings[i].rx_buf_len, - BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); + BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT)); vqpi++; } - adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, - (u8 *)vqci, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + (u8 *)vqci, len); kfree(vqci); } /** - * i40evf_enable_queues + * iavf_enable_queues * @adapter: adapter structure * * Request that the PF enable all of our queues. **/ -void i40evf_enable_queues(struct i40evf_adapter *adapter) +void iavf_enable_queues(struct iavf_adapter *adapter) { struct virtchnl_queue_select vqs; @@ -281,18 +310,18 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; - adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, - (u8 *)&vqs, sizeof(vqs)); + adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); } /** - * i40evf_disable_queues + * iavf_disable_queues * @adapter: adapter structure * * Request that the PF disable all of our queues. **/ -void i40evf_disable_queues(struct i40evf_adapter *adapter) +void iavf_disable_queues(struct iavf_adapter *adapter) { struct virtchnl_queue_select vqs; @@ -306,24 +335,24 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; - adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, - (u8 *)&vqs, sizeof(vqs)); + adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); } /** - * i40evf_map_queues + * iavf_map_queues * @adapter: adapter structure * * Request that the PF map queues to interrupt vectors. Misc causes, including * admin queue, are always mapped to vector 0. **/ -void i40evf_map_queues(struct i40evf_adapter *adapter) +void iavf_map_queues(struct iavf_adapter *adapter) { struct virtchnl_irq_map_info *vimi; struct virtchnl_vector_map *vecmap; int v_idx, q_vectors, len; - struct i40e_q_vector *q_vector; + struct iavf_q_vector *q_vector; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -352,8 +381,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) vecmap->vector_id = v_idx + NONQ_VECS; vecmap->txq_map = q_vector->ring_mask; vecmap->rxq_map = q_vector->ring_mask; - vecmap->rxitr_idx = I40E_RX_ITR; - vecmap->txitr_idx = I40E_TX_ITR; + vecmap->rxitr_idx = IAVF_RX_ITR; + vecmap->txitr_idx = IAVF_TX_ITR; } /* Misc vector last - this is only for AdminQ messages */ vecmap = &vimi->vecmap[v_idx]; @@ -362,21 +391,21 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) vecmap->txq_map = 0; vecmap->rxq_map = 0; - adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, - (u8 *)vimi, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, + (u8 *)vimi, len); kfree(vimi); } /** - * i40evf_request_queues + * iavf_request_queues * @adapter: adapter structure * @num: number of requested queues * * We get a default number of queues from the PF. This enables us to request a * different number. Returns 0 on success, negative on failure **/ -int i40evf_request_queues(struct i40evf_adapter *adapter, int num) +int iavf_request_queues(struct iavf_adapter *adapter, int num) { struct virtchnl_vf_res_request vfres; @@ -390,22 +419,22 @@ int i40evf_request_queues(struct i40evf_adapter *adapter, int num) vfres.num_queue_pairs = num; adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; - return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, - (u8 *)&vfres, sizeof(vfres)); + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, + (u8 *)&vfres, sizeof(vfres)); } /** - * i40evf_add_ether_addrs + * iavf_add_ether_addrs * @adapter: adapter structure * * Request that the PF add one or more addresses to our filters. **/ -void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) +void iavf_add_ether_addrs(struct iavf_adapter *adapter) { struct virtchnl_ether_addr_list *veal; int len, i = 0, count = 0; - struct i40evf_mac_filter *f; + struct iavf_mac_filter *f; bool more = false; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { @@ -422,7 +451,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) count++; } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } @@ -430,9 +459,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) len = sizeof(struct virtchnl_ether_addr_list) + (count * sizeof(struct virtchnl_ether_addr)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { + if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr); len = sizeof(struct virtchnl_ether_addr_list) + @@ -458,25 +487,24 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) } } if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, - (u8 *)veal, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); kfree(veal); } /** - * i40evf_del_ether_addrs + * iavf_del_ether_addrs * @adapter: adapter structure * * Request that the PF remove one or more addresses from our filters. **/ -void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) +void iavf_del_ether_addrs(struct iavf_adapter *adapter) { struct virtchnl_ether_addr_list *veal; - struct i40evf_mac_filter *f, *ftmp; + struct iavf_mac_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false; @@ -494,7 +522,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) count++; } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } @@ -502,9 +530,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) len = sizeof(struct virtchnl_ether_addr_list) + (count * sizeof(struct virtchnl_ether_addr)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { + if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr); len = sizeof(struct virtchnl_ether_addr_list) + @@ -530,26 +558,25 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) } } if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, - (u8 *)veal, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); kfree(veal); } /** - * i40evf_add_vlans + * iavf_add_vlans * @adapter: adapter structure * * Request that the PF add one or more VLAN filters to our VSI. **/ -void i40evf_add_vlans(struct i40evf_adapter *adapter) +void iavf_add_vlans(struct iavf_adapter *adapter) { struct virtchnl_vlan_filter_list *vvfl; int len, i = 0, count = 0; - struct i40evf_vlan_filter *f; + struct iavf_vlan_filter *f; bool more = false; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { @@ -566,7 +593,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) count++; } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } @@ -574,9 +601,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) len = sizeof(struct virtchnl_vlan_filter_list) + (count * sizeof(u16)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { + if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_vlan_filter_list)) / sizeof(u16); len = sizeof(struct virtchnl_vlan_filter_list) + @@ -601,24 +628,24 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) } } if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } /** - * i40evf_del_vlans + * iavf_del_vlans * @adapter: adapter structure * * Request that the PF remove one or more VLAN filters from our VSI. **/ -void i40evf_del_vlans(struct i40evf_adapter *adapter) +void iavf_del_vlans(struct iavf_adapter *adapter) { struct virtchnl_vlan_filter_list *vvfl; - struct i40evf_vlan_filter *f, *ftmp; + struct iavf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false; @@ -636,7 +663,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) count++; } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } @@ -644,9 +671,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) len = sizeof(struct virtchnl_vlan_filter_list) + (count * sizeof(u16)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { + if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_vlan_filter_list)) / sizeof(u16); len = sizeof(struct virtchnl_vlan_filter_list) + @@ -672,22 +699,22 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) } } if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); kfree(vvfl); } /** - * i40evf_set_promiscuous + * iavf_set_promiscuous * @adapter: adapter structure * @flags: bitmask to control unicast/multicast promiscuous. * * Request that the PF enable promiscuous mode for our VSI. **/ -void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) +void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) { struct virtchnl_promisc_info vpi; int promisc_all; @@ -702,39 +729,39 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) promisc_all = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC; if ((flags & promisc_all) == promisc_all) { - adapter->flags |= I40EVF_FLAG_PROMISC_ON; - adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC; + adapter->flags |= IAVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC; dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); } if (flags & FLAG_VF_MULTICAST_PROMISC) { - adapter->flags |= I40EVF_FLAG_ALLMULTI_ON; - adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + adapter->flags |= IAVF_FLAG_ALLMULTI_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); } if (!flags) { - adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON | - I40EVF_FLAG_ALLMULTI_ON); - adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC | - I40EVF_FLAG_AQ_RELEASE_ALLMULTI); + adapter->flags &= ~(IAVF_FLAG_PROMISC_ON | + IAVF_FLAG_ALLMULTI_ON); + adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC | + IAVF_FLAG_AQ_RELEASE_ALLMULTI); dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); } adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; vpi.vsi_id = adapter->vsi_res->vsi_id; vpi.flags = flags; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - (u8 *)&vpi, sizeof(vpi)); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + (u8 *)&vpi, sizeof(vpi)); } /** - * i40evf_request_stats + * iavf_request_stats * @adapter: adapter structure * * Request VSI statistics from PF. **/ -void i40evf_request_stats(struct i40evf_adapter *adapter) +void iavf_request_stats(struct iavf_adapter *adapter) { struct virtchnl_queue_select vqs; @@ -745,19 +772,19 @@ void i40evf_request_stats(struct i40evf_adapter *adapter) adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ - if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, - (u8 *)&vqs, sizeof(vqs))) + if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, + sizeof(vqs))) /* if the request failed, don't lock out others */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; } /** - * i40evf_get_hena + * iavf_get_hena * @adapter: adapter structure * * Request hash enable capabilities from PF **/ -void i40evf_get_hena(struct i40evf_adapter *adapter) +void iavf_get_hena(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -766,18 +793,17 @@ void i40evf_get_hena(struct i40evf_adapter *adapter) return; } adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; - adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, - NULL, 0); + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0); } /** - * i40evf_set_hena + * iavf_set_hena * @adapter: adapter structure * * Request the PF to set our RSS hash capabilities **/ -void i40evf_set_hena(struct i40evf_adapter *adapter) +void iavf_set_hena(struct iavf_adapter *adapter) { struct virtchnl_rss_hena vrh; @@ -789,18 +815,18 @@ void i40evf_set_hena(struct i40evf_adapter *adapter) } vrh.hena = adapter->hena; adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; - adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, - (u8 *)&vrh, sizeof(vrh)); + adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh, + sizeof(vrh)); } /** - * i40evf_set_rss_key + * iavf_set_rss_key * @adapter: adapter structure * * Request the PF to set our RSS hash key **/ -void i40evf_set_rss_key(struct i40evf_adapter *adapter) +void iavf_set_rss_key(struct iavf_adapter *adapter) { struct virtchnl_rss_key *vrk; int len; @@ -821,19 +847,18 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter) memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; - adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, - (u8 *)vrk, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len); kfree(vrk); } /** - * i40evf_set_rss_lut + * iavf_set_rss_lut * @adapter: adapter structure * * Request the PF to set our RSS lookup table **/ -void i40evf_set_rss_lut(struct i40evf_adapter *adapter) +void iavf_set_rss_lut(struct iavf_adapter *adapter) { struct virtchnl_rss_lut *vrl; int len; @@ -853,19 +878,18 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter) vrl->lut_entries = adapter->rss_lut_size; memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; - adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, - (u8 *)vrl, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len); kfree(vrl); } /** - * i40evf_enable_vlan_stripping + * iavf_enable_vlan_stripping * @adapter: adapter structure * * Request VLAN header stripping to be enabled **/ -void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter) +void iavf_enable_vlan_stripping(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -874,18 +898,17 @@ void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter) return; } adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; - adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, - NULL, 0); + adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0); } /** - * i40evf_disable_vlan_stripping + * iavf_disable_vlan_stripping * @adapter: adapter structure * * Request VLAN header stripping to be disabled **/ -void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter) +void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -894,18 +917,17 @@ void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter) return; } adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; - adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, - NULL, 0); + adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); } /** - * i40evf_print_link_message - print link up or down + * iavf_print_link_message - print link up or down * @adapter: adapter structure * * Log a message telling the world of our wonderous link status */ -static void i40evf_print_link_message(struct i40evf_adapter *adapter) +static void iavf_print_link_message(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; char *speed = "Unknown "; @@ -942,13 +964,13 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter) } /** - * i40evf_enable_channel + * iavf_enable_channel * @adapter: adapter structure * * Request that the PF enable channels as specified by * the user via tc tool. **/ -void i40evf_enable_channels(struct i40evf_adapter *adapter) +void iavf_enable_channels(struct iavf_adapter *adapter) { struct virtchnl_tc_info *vti = NULL; u16 len; @@ -976,22 +998,21 @@ void i40evf_enable_channels(struct i40evf_adapter *adapter) adapter->ch_config.ch_info[i].max_tx_rate; } - adapter->ch_config.state = __I40EVF_TC_RUNNING; - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __IAVF_TC_RUNNING; + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; - adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, - (u8 *)vti, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len); kfree(vti); } /** - * i40evf_disable_channel + * iavf_disable_channel * @adapter: adapter structure * * Request that the PF disable channels that are configured **/ -void i40evf_disable_channels(struct i40evf_adapter *adapter) +void iavf_disable_channels(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -1000,23 +1021,22 @@ void i40evf_disable_channels(struct i40evf_adapter *adapter) return; } - adapter->ch_config.state = __I40EVF_TC_INVALID; - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __IAVF_TC_INVALID; + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; - adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, - NULL, 0); + adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0); } /** - * i40evf_print_cloud_filter + * iavf_print_cloud_filter * @adapter: adapter structure * @f: cloud filter to print * * Print the cloud filter **/ -static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter, - struct virtchnl_filter *f) +static void iavf_print_cloud_filter(struct iavf_adapter *adapter, + struct virtchnl_filter *f) { switch (f->flow_type) { case VIRTCHNL_TCP_V4_FLOW: @@ -1043,15 +1063,15 @@ static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter, } /** - * i40evf_add_cloud_filter + * iavf_add_cloud_filter * @adapter: adapter structure * * Request that the PF add cloud filters as specified * by the user via tc tool. **/ -void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) +void iavf_add_cloud_filter(struct iavf_adapter *adapter) { - struct i40evf_cloud_filter *cf; + struct iavf_cloud_filter *cf; struct virtchnl_filter *f; int len = 0, count = 0; @@ -1068,7 +1088,7 @@ void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) } } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER; return; } adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; @@ -1082,25 +1102,24 @@ void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) if (cf->add) { memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); cf->add = false; - cf->state = __I40EVF_CF_ADD_PENDING; - i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_ADD_CLOUD_FILTER, - (u8 *)f, len); + cf->state = __IAVF_CF_ADD_PENDING; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER, + (u8 *)f, len); } } kfree(f); } /** - * i40evf_del_cloud_filter + * iavf_del_cloud_filter * @adapter: adapter structure * * Request that the PF delete cloud filters as specified * by the user via tc tool. **/ -void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) +void iavf_del_cloud_filter(struct iavf_adapter *adapter) { - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_cloud_filter *cf, *cftmp; struct virtchnl_filter *f; int len = 0, count = 0; @@ -1117,7 +1136,7 @@ void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) } } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER; return; } adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; @@ -1131,30 +1150,29 @@ void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) if (cf->del) { memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); cf->del = false; - cf->state = __I40EVF_CF_DEL_PENDING; - i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_DEL_CLOUD_FILTER, - (u8 *)f, len); + cf->state = __IAVF_CF_DEL_PENDING; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER, + (u8 *)f, len); } } kfree(f); } /** - * i40evf_request_reset + * iavf_request_reset * @adapter: adapter structure * * Request that the PF reset this VF. No response is expected. **/ -void i40evf_request_reset(struct i40evf_adapter *adapter) +void iavf_request_reset(struct iavf_adapter *adapter) { /* Don't check CURRENT_OP - this is always higher priority */ - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); adapter->current_op = VIRTCHNL_OP_UNKNOWN; } /** - * i40evf_virtchnl_completion + * iavf_virtchnl_completion * @adapter: adapter structure * @v_opcode: opcode sent by PF * @v_retval: retval sent by PF @@ -1165,10 +1183,9 @@ void i40evf_request_reset(struct i40evf_adapter *adapter) * wait, we fire off our requests and assume that no errors will be returned. * This function handles the reply messages. **/ -void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen) +void iavf_virtchnl_completion(struct iavf_adapter *adapter, + enum virtchnl_ops v_opcode, iavf_status v_retval, + u8 *msg, u16 msglen) { struct net_device *netdev = adapter->netdev; @@ -1176,6 +1193,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; bool link_up = vpe->event_data.link_event.link_status; + switch (vpe->event) { case VIRTCHNL_EVENT_LINK_CHANGE: adapter->link_speed = @@ -1193,7 +1211,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, * after we enable queues and actually prepared * to send traffic. */ - if (adapter->state != __I40EVF_RUNNING) + if (adapter->state != __IAVF_RUNNING) break; /* For ADq enabled VF, we reconfigure VSIs and @@ -1201,7 +1219,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, * queues are enabled. */ if (adapter->flags & - I40EVF_FLAG_QUEUES_DISABLED) + IAVF_FLAG_QUEUES_DISABLED) break; } @@ -1213,12 +1231,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); } - i40evf_print_link_message(adapter); + iavf_print_link_message(adapter); break; case VIRTCHNL_EVENT_RESET_IMPENDING: dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n"); - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { - adapter->flags |= I40EVF_FLAG_RESET_PENDING; + if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { + adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); schedule_work(&adapter->reset_task); } @@ -1234,48 +1252,48 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, switch (v_opcode) { case VIRTCHNL_OP_ADD_VLAN: dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); + iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_ADD_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); + iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_DEL_VLAN: dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); + iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_DEL_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); + iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_ENABLE_CHANNELS: dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - adapter->ch_config.state = __I40EVF_TC_INVALID; + iavf_stat_str(&adapter->hw, v_retval)); + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __IAVF_TC_INVALID; netdev_reset_tc(netdev); netif_tx_start_all_queues(netdev); break; case VIRTCHNL_OP_DISABLE_CHANNELS: dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - adapter->ch_config.state = __I40EVF_TC_RUNNING; + iavf_stat_str(&adapter->hw, v_retval)); + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __IAVF_TC_RUNNING; netif_tx_start_all_queues(netdev); break; case VIRTCHNL_OP_ADD_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_cloud_filter *cf, *cftmp; list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_ADD_PENDING) { - cf->state = __I40EVF_CF_INVALID; + if (cf->state == __IAVF_CF_ADD_PENDING) { + cf->state = __IAVF_CF_INVALID; dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", - i40evf_stat_str(&adapter->hw, - v_retval)); - i40evf_print_cloud_filter(adapter, - &cf->f); + iavf_stat_str(&adapter->hw, + v_retval)); + iavf_print_cloud_filter(adapter, + &cf->f); list_del(&cf->list); kfree(cf); adapter->num_cloud_filters--; @@ -1284,32 +1302,31 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, } break; case VIRTCHNL_OP_DEL_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf; + struct iavf_cloud_filter *cf; list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_DEL_PENDING) { - cf->state = __I40EVF_CF_ACTIVE; + if (cf->state == __IAVF_CF_DEL_PENDING) { + cf->state = __IAVF_CF_ACTIVE; dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", - i40evf_stat_str(&adapter->hw, - v_retval)); - i40evf_print_cloud_filter(adapter, - &cf->f); + iavf_stat_str(&adapter->hw, + v_retval)); + iavf_print_cloud_filter(adapter, + &cf->f); } } } break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", - v_retval, - i40evf_stat_str(&adapter->hw, v_retval), + v_retval, iavf_stat_str(&adapter->hw, v_retval), v_opcode); } } switch (v_opcode) { case VIRTCHNL_OP_GET_STATS: { - struct i40e_eth_stats *stats = - (struct i40e_eth_stats *)msg; + struct iavf_eth_stats *stats = + (struct iavf_eth_stats *)msg; netdev->stats.rx_packets = stats->rx_unicast + stats->rx_multicast + stats->rx_broadcast; @@ -1326,25 +1343,33 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, break; case VIRTCHNL_OP_GET_VF_RESOURCES: { u16 len = sizeof(struct virtchnl_vf_resource) + - I40E_MAX_VF_VSI * + IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); memcpy(adapter->vf_res, msg, min(msglen, len)); - i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); - /* restore current mac address */ - ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); - i40evf_process_config(adapter); + iavf_validate_num_queues(adapter); + iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); + if (is_zero_ether_addr(adapter->hw.mac.addr)) { + /* restore current mac address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + /* refresh current mac address if changed */ + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, + adapter->hw.mac.addr); + } + iavf_process_config(adapter); } break; case VIRTCHNL_OP_ENABLE_QUEUES: /* enable transmits */ - i40evf_irq_enable(adapter, true); - adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED; + iavf_irq_enable(adapter, true); + adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; break; case VIRTCHNL_OP_DISABLE_QUEUES: - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - if (adapter->state == __I40EVF_DOWN_PENDING) { - adapter->state = __I40EVF_DOWN; + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + if (adapter->state == __IAVF_DOWN_PENDING) { + adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); } break; @@ -1363,8 +1388,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, * care about that. */ if (msglen && CLIENT_ENABLED(adapter)) - i40evf_notify_client_message(&adapter->vsi, - msg, msglen); + iavf_notify_client_message(&adapter->vsi, msg, msglen); break; case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: @@ -1373,6 +1397,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; + if (msglen == sizeof(*vrh)) adapter->hena = vrh->hena; else @@ -1383,32 +1408,33 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, case VIRTCHNL_OP_REQUEST_QUEUES: { struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; + if (vfres->num_queue_pairs != adapter->num_req_queues) { dev_info(&adapter->pdev->dev, "Requested %d queues, PF can support %d\n", adapter->num_req_queues, vfres->num_queue_pairs); adapter->num_req_queues = 0; - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; } } break; case VIRTCHNL_OP_ADD_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf; + struct iavf_cloud_filter *cf; list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_ADD_PENDING) - cf->state = __I40EVF_CF_ACTIVE; + if (cf->state == __IAVF_CF_ADD_PENDING) + cf->state = __IAVF_CF_ACTIVE; } } break; case VIRTCHNL_OP_DEL_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_cloud_filter *cf, *cftmp; list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_DEL_PENDING) { - cf->state = __I40EVF_CF_INVALID; + if (cf->state == __IAVF_CF_DEL_PENDING) { + cf->state = __IAVF_CF_INVALID; list_del(&cf->list); kfree(cf); adapter->num_cloud_filters--; diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 4058673fd853..e5d6f684437e 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -13,5 +13,7 @@ ice-y := ice_main.o \ ice_nvm.o \ ice_switch.o \ ice_sched.o \ + ice_lib.o \ ice_txrx.o \ ice_ethtool.o +ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 868f4a1d0f72..4c4b5717a627 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -28,6 +28,7 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/if_bridge.h> +#include <linux/avf/virtchnl.h> #include <net/ipv6.h> #include "ice_devids.h" #include "ice_type.h" @@ -35,17 +36,20 @@ #include "ice_switch.h" #include "ice_common.h" #include "ice_sched.h" +#include "ice_virtchnl_pf.h" +#include "ice_sriov.h" extern const char ice_drv_ver[]; #define ICE_BAR0 0 #define ICE_DFLT_NUM_DESC 128 -#define ICE_MIN_NUM_DESC 8 -#define ICE_MAX_NUM_DESC 8160 #define ICE_REQ_DESC_MULTIPLE 32 +#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE +#define ICE_MAX_NUM_DESC 8160 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_ETHTOOL_FWVER_LEN 32 #define ICE_AQ_LEN 64 +#define ICE_MBXQ_LEN 64 #define ICE_MIN_MSIX 2 #define ICE_NO_VSI 0xffff #define ICE_MAX_VSI_ALLOC 130 @@ -62,6 +66,15 @@ extern const char ice_drv_ver[]; #define ICE_RES_VALID_BIT 0x8000 #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) #define ICE_INVAL_Q_INDEX 0xffff +#define ICE_INVAL_VFID 256 +#define ICE_MAX_VF_COUNT 256 +#define ICE_MAX_QS_PER_VF 256 +#define ICE_MIN_QS_PER_VF 1 +#define ICE_DFLT_QS_PER_VF 4 +#define ICE_MAX_BASE_QS_PER_VF 16 +#define ICE_MAX_INTR_PER_VF 65 +#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) +#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) @@ -122,7 +135,8 @@ struct ice_sw { enum ice_state { __ICE_DOWN, __ICE_NEEDS_RESTART, - __ICE_RESET_RECOVERY_PENDING, /* set by driver when reset starts */ + __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ + __ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ __ICE_PFR_REQ, /* set by driver and peers */ __ICE_CORER_REQ, /* set by driver and peers */ __ICE_GLOBR_REQ, /* set by driver and peers */ @@ -131,10 +145,24 @@ enum ice_state { __ICE_EMPR_RECV, /* set by OICR handler */ __ICE_SUSPENDED, /* set on module remove path */ __ICE_RESET_FAILED, /* set by reset/rebuild */ + /* When checking for the PF to be in a nominal operating state, the + * bits that are grouped at the beginning of the list need to be + * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will + * be checked. If you need to add a bit into consideration for nominal + * operating state, it must be added before + * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position + * without appropriate consideration. + */ + __ICE_STATE_NOMINAL_CHECK_BITS, __ICE_ADMINQ_EVENT_PENDING, + __ICE_MAILBOXQ_EVENT_PENDING, + __ICE_MDD_EVENT_PENDING, + __ICE_VFLR_EVENT_PENDING, __ICE_FLTR_OVERFLOW_PROMISC, + __ICE_VF_DIS, __ICE_CFG_BUSY, __ICE_SERVICE_SCHED, + __ICE_SERVICE_DIS, __ICE_STATE_NBITS /* must be last */ }; @@ -168,7 +196,8 @@ struct ice_vsi { u32 rx_buf_failed; u32 rx_page_failed; int num_q_vectors; - int base_vector; + int sw_base_vector; /* Irq base for OS reserved vectors */ + int hw_base_vector; /* HW (absolute) index of a vector */ enum ice_vsi_type type; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ @@ -176,6 +205,8 @@ struct ice_vsi { /* Interrupt thresholds */ u16 work_lmt; + s16 vf_id; /* VF ID for SR-IOV VSIs */ + /* RSS config */ u16 rss_table_size; /* HW RSS table size */ u16 rss_size; /* Allocated RSS queues */ @@ -225,21 +256,39 @@ struct ice_q_vector { u8 num_ring_tx; /* total number of tx rings in vector */ u8 num_ring_rx; /* total number of rx rings in vector */ char name[ICE_INT_NAME_STR_LEN]; + /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this + * value to the device + */ + u8 intrl; } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { ICE_FLAG_MSIX_ENA, ICE_FLAG_FLTR_SYNC, ICE_FLAG_RSS_ENA, + ICE_FLAG_SRIOV_ENA, + ICE_FLAG_SRIOV_CAPABLE, ICE_PF_FLAGS_NBITS /* must be last */ }; struct ice_pf { struct pci_dev *pdev; + + /* OS reserved IRQ details */ struct msix_entry *msix_entries; - struct ice_res_tracker *irq_tracker; + struct ice_res_tracker *sw_irq_tracker; + + /* HW reserved Interrupts for this PF */ + struct ice_res_tracker *hw_irq_tracker; + struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_sw *first_sw; /* first switch created by firmware */ + /* Virtchnl/SR-IOV config info */ + struct ice_vf *vf; + int num_alloc_vfs; /* actual number of VFs allocated */ + u16 num_vfs_supported; /* num VFs supported for this PF */ + u16 num_vf_qps; /* num queue pairs per VF */ + u16 num_vf_msix; /* num vectors per VF */ DECLARE_BITMAP(state, __ICE_STATE_NBITS); DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS); DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS); @@ -252,9 +301,11 @@ struct ice_pf { struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ u32 msg_enable; u32 hw_csum_rx_error; - u32 oicr_idx; /* Other interrupt cause vector index */ + u32 sw_oicr_idx; /* Other interrupt cause SW vector index */ + u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ + u32 hw_oicr_idx; /* Other interrupt cause vector HW index */ + u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */ u32 num_lan_msix; /* Total MSIX vectors for base driver */ - u32 num_avail_msix; /* remaining MSIX vectors left unclaimed */ u16 num_lan_tx; /* num lan tx queues setup */ u16 num_lan_rx; /* num lan rx queues setup */ u16 q_left_tx; /* remaining num tx queues left unclaimed */ @@ -270,6 +321,9 @@ struct ice_pf { struct ice_hw_port_stats stats_prev; struct ice_hw hw; u8 stat_prev_loaded; /* has previous stats been loaded */ + u32 tx_timeout_count; + unsigned long tx_timeout_last_recovery; + u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; }; @@ -286,8 +340,8 @@ struct ice_netdev_priv { static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, struct ice_q_vector *q_vector) { - u32 vector = (vsi && q_vector) ? vsi->base_vector + q_vector->v_idx : - ((struct ice_pf *)hw->back)->oicr_idx; + u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx : + ((struct ice_pf *)hw->back)->hw_oicr_idx; int itr = ICE_ITR_NONE; u32 val; diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index a0614f472658..6653555f55dd 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -87,6 +87,8 @@ struct ice_aqc_list_caps { /* Device/Function buffer entry, repeated per reported capability */ struct ice_aqc_list_caps_elem { __le16 cap; +#define ICE_AQC_CAPS_SRIOV 0x0012 +#define ICE_AQC_CAPS_VF 0x0013 #define ICE_AQC_CAPS_VSI 0x0017 #define ICE_AQC_CAPS_RSS 0x0040 #define ICE_AQC_CAPS_RXQS 0x0041 @@ -443,6 +445,8 @@ struct ice_aqc_vsi_props { u8 reserved[24]; }; +#define ICE_MAX_NUM_RECIPES 64 + /* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3) */ struct ice_aqc_sw_rules { @@ -734,6 +738,10 @@ struct ice_aqc_add_elem { struct ice_aqc_txsched_elem_data generic[1]; }; +struct ice_aqc_get_elem { + struct ice_aqc_txsched_elem_data generic[1]; +}; + struct ice_aqc_get_topo_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; struct ice_aqc_txsched_elem_data @@ -771,9 +779,8 @@ struct ice_aqc_layer_props { u8 chunk_size; __le16 max_device_nodes; __le16 max_pf_nodes; - u8 rsvd0[2]; - __le16 max_shared_rate_lmtr; - __le16 max_children; + u8 rsvd0[4]; + __le16 max_sibl_grp_sz; __le16 max_cir_rl_profiles; __le16 max_eir_rl_profiles; __le16 max_srl_profiles; @@ -919,9 +926,11 @@ struct ice_aqc_set_phy_cfg_data { u8 caps; #define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) #define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) -#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2) -#define ICE_AQ_PHY_ENA_LINK BIT(3) -#define ICE_AQ_PHY_ENA_ATOMIC_LINK BIT(5) +#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2) +#define ICE_AQ_PHY_ENA_LINK BIT(3) +#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5) +#define ICE_AQ_PHY_ENA_LESM BIT(6) +#define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7) u8 low_power_ctrl; __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */ __le16 eeer_value; @@ -1068,6 +1077,19 @@ struct ice_aqc_nvm { __le32 addr_low; }; +/** + * Send to PF command (indirect 0x0801) id is only used by PF + * + * Send to VF command (indirect 0x0802) id is only used by PF + * + */ +struct ice_aqc_pf_vf_msg { + __le32 id; + u32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + /* Get/Set RSS key (indirect 0x0B04/0x0B02) */ struct ice_aqc_get_set_rss_key { #define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15) @@ -1203,6 +1225,84 @@ struct ice_aqc_dis_txq { struct ice_aqc_dis_txq_item qgrps[1]; }; +/* Configure Firmware Logging Command (indirect 0xFF09) + * Logging Information Read Response (indirect 0xFF10) + * Note: The 0xFF10 command has no input parameters. + */ +struct ice_aqc_fw_logging { + u8 log_ctrl; +#define ICE_AQC_FW_LOG_AQ_EN BIT(0) +#define ICE_AQC_FW_LOG_UART_EN BIT(1) + u8 rsvd0; + u8 log_ctrl_valid; /* Not used by 0xFF10 Response */ +#define ICE_AQC_FW_LOG_AQ_VALID BIT(0) +#define ICE_AQC_FW_LOG_UART_VALID BIT(1) + u8 rsvd1[5]; + __le32 addr_high; + __le32 addr_low; +}; + +enum ice_aqc_fw_logging_mod { + ICE_AQC_FW_LOG_ID_GENERAL = 0, + ICE_AQC_FW_LOG_ID_CTRL, + ICE_AQC_FW_LOG_ID_LINK, + ICE_AQC_FW_LOG_ID_LINK_TOPO, + ICE_AQC_FW_LOG_ID_DNL, + ICE_AQC_FW_LOG_ID_I2C, + ICE_AQC_FW_LOG_ID_SDP, + ICE_AQC_FW_LOG_ID_MDIO, + ICE_AQC_FW_LOG_ID_ADMINQ, + ICE_AQC_FW_LOG_ID_HDMA, + ICE_AQC_FW_LOG_ID_LLDP, + ICE_AQC_FW_LOG_ID_DCBX, + ICE_AQC_FW_LOG_ID_DCB, + ICE_AQC_FW_LOG_ID_NETPROXY, + ICE_AQC_FW_LOG_ID_NVM, + ICE_AQC_FW_LOG_ID_AUTH, + ICE_AQC_FW_LOG_ID_VPD, + ICE_AQC_FW_LOG_ID_IOSF, + ICE_AQC_FW_LOG_ID_PARSER, + ICE_AQC_FW_LOG_ID_SW, + ICE_AQC_FW_LOG_ID_SCHEDULER, + ICE_AQC_FW_LOG_ID_TXQ, + ICE_AQC_FW_LOG_ID_RSVD, + ICE_AQC_FW_LOG_ID_POST, + ICE_AQC_FW_LOG_ID_WATCHDOG, + ICE_AQC_FW_LOG_ID_TASK_DISPATCH, + ICE_AQC_FW_LOG_ID_MNG, + ICE_AQC_FW_LOG_ID_MAX, +}; + +/* This is the buffer for both of the logging commands. + * The entry array size depends on the datalen parameter in the descriptor. + * There will be a total of datalen / 2 entries. + */ +struct ice_aqc_fw_logging_data { + __le16 entry[1]; +#define ICE_AQC_FW_LOG_ID_S 0 +#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S) + +#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */ +#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */ + +#define ICE_AQC_FW_LOG_EN_S 12 +#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S) +#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */ +#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */ +#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */ +#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */ +}; + +/* Get/Clear FW Log (indirect 0xFF11) */ +struct ice_aqc_get_clear_fw_log { + u8 flags; +#define ICE_AQC_FW_LOG_CLEAR BIT(0) +#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1) + u8 rsvd1[7]; + __le32 addr_high; + __le32 addr_low; +}; + /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags @@ -1247,11 +1347,15 @@ struct ice_aq_desc { struct ice_aqc_query_txsched_res query_sched_res; struct ice_aqc_add_move_delete_elem add_move_delete_elem; struct ice_aqc_nvm nvm; + struct ice_aqc_pf_vf_msg virt; struct ice_aqc_get_set_rss_lut get_set_rss_lut; struct ice_aqc_get_set_rss_key get_set_rss_key; struct ice_aqc_add_txqs add_txqs; struct ice_aqc_dis_txqs dis_txqs; struct ice_aqc_add_get_update_free_vsi vsi_cmd; + struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; + struct ice_aqc_fw_logging fw_logging; + struct ice_aqc_get_clear_fw_log get_clear_fw_log; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_set_event_mask set_event_mask; struct ice_aqc_get_link_status get_link_status; @@ -1325,6 +1429,7 @@ enum ice_adminq_opc { /* transmit scheduler commands */ ice_aqc_opc_get_dflt_topo = 0x0400, ice_aqc_opc_add_sched_elems = 0x0401, + ice_aqc_opc_get_sched_elems = 0x0404, ice_aqc_opc_suspend_sched_elems = 0x0409, ice_aqc_opc_resume_sched_elems = 0x040A, ice_aqc_opc_delete_sched_elems = 0x040F, @@ -1340,6 +1445,10 @@ enum ice_adminq_opc { /* NVM commands */ ice_aqc_opc_nvm_read = 0x0701, + /* PF/VF mailbox commands */ + ice_mbx_opc_send_msg_to_pf = 0x0801, + ice_mbx_opc_send_msg_to_vf = 0x0802, + /* RSS commands */ ice_aqc_opc_set_rss_key = 0x0B02, ice_aqc_opc_set_rss_lut = 0x0B03, @@ -1349,6 +1458,9 @@ enum ice_adminq_opc { /* TX queue handling commands/events */ ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_dis_txqs = 0x0C31, + + /* debug commands */ + ice_aqc_opc_fw_logging = 0xFF09, }; #endif /* _ICE_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 661beea6af79..c52f450f2c0d 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -7,16 +7,16 @@ #define ICE_PF_RESET_WAIT_COUNT 200 -#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \ - wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \ +#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \ + wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \ ((ICE_RX_OPC_MDID << \ GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \ GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \ (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \ GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M)) -#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \ - wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \ +#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \ + wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \ (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \ GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \ (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \ @@ -125,7 +125,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, * * Returns the various PHY capabilities supported on the Port (0x0600) */ -static enum ice_status +enum ice_status ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *pcaps, struct ice_sq_cd *cd) @@ -290,30 +290,85 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, } /** - * ice_init_flex_parser - initialize rx flex parser + * ice_init_flex_flags * @hw: pointer to the hardware structure + * @prof_id: Rx Descriptor Builder profile ID * - * Function to initialize flex descriptors + * Function to initialize Rx flex flags */ -static void ice_init_flex_parser(struct ice_hw *hw) +static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id) { u8 idx = 0; - ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0); - ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1); - ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2); - ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, - ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100, - idx++); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN, - ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); + /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout: + * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE + * flexiflags1[3:0] - Not used for flag programming + * flexiflags2[7:0] - Tunnel and VLAN types + * 2 invalid fields in last index + */ + switch (prof_id) { + /* Rx flex flags are currently programmed for the NIC profiles only. + * Different flag bit programming configurations can be added per + * profile as needed. + */ + case ICE_RXDID_FLEX_NIC: + case ICE_RXDID_FLEX_NIC_2: + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG, + ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI, + ICE_RXFLG_FIN, idx++); + /* flex flag 1 is not used for flexi-flag programming, skipping + * these four FLG64 bits. + */ + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST, + ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI, + ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100, + ICE_RXFLG_EVLAN_x9100, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100, + ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC, + ICE_RXFLG_TNL0, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, + ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); + break; + + default: + ice_debug(hw, ICE_DBG_INIT, + "Flag programming for profile ID %d not supported\n", + prof_id); + } +} + +/** + * ice_init_flex_flds + * @hw: pointer to the hardware structure + * @prof_id: Rx Descriptor Builder profile ID + * + * Function to initialize flex descriptors + */ +static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id) +{ + enum ice_flex_rx_mdid mdid; + + switch (prof_id) { + case ICE_RXDID_FLEX_NIC: + case ICE_RXDID_FLEX_NIC_2: + ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0); + ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1); + ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2); + + mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ? + ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH; + + ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3); + + ice_init_flex_flags(hw, prof_id); + break; + + default: + ice_debug(hw, ICE_DBG_INIT, + "Field init for profile ID %d not supported\n", + prof_id); + } } /** @@ -333,20 +388,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) INIT_LIST_HEAD(&sw->vsi_list_map_head); - mutex_init(&sw->mac_list_lock); - INIT_LIST_HEAD(&sw->mac_list_head); - - mutex_init(&sw->vlan_list_lock); - INIT_LIST_HEAD(&sw->vlan_list_head); - - mutex_init(&sw->eth_m_list_lock); - INIT_LIST_HEAD(&sw->eth_m_list_head); - - mutex_init(&sw->promisc_list_lock); - INIT_LIST_HEAD(&sw->promisc_list_head); - - mutex_init(&sw->mac_vlan_list_lock); - INIT_LIST_HEAD(&sw->mac_vlan_list_head); + ice_init_def_sw_recp(hw); return 0; } @@ -360,20 +402,232 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) struct ice_switch_info *sw = hw->switch_info; struct ice_vsi_list_map_info *v_pos_map; struct ice_vsi_list_map_info *v_tmp_map; + struct ice_sw_recipe *recps; + u8 i; list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, list_entry) { list_del(&v_pos_map->list_entry); devm_kfree(ice_hw_to_dev(hw), v_pos_map); } + recps = hw->switch_info->recp_list; + for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; + + recps[i].root_rid = i; + mutex_destroy(&recps[i].filt_rule_lock); + list_for_each_entry_safe(lst_itr, tmp_entry, + &recps[i].filt_rules, list_entry) { + list_del(&lst_itr->list_entry); + devm_kfree(ice_hw_to_dev(hw), lst_itr); + } + } + ice_rm_all_sw_replay_rule_info(hw); + devm_kfree(ice_hw_to_dev(hw), sw->recp_list); + devm_kfree(ice_hw_to_dev(hw), sw); +} - mutex_destroy(&sw->mac_list_lock); - mutex_destroy(&sw->vlan_list_lock); - mutex_destroy(&sw->eth_m_list_lock); - mutex_destroy(&sw->promisc_list_lock); - mutex_destroy(&sw->mac_vlan_list_lock); +#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \ + (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry))) +#define ICE_FW_LOG_DESC_SIZE_MAX \ + ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX) - devm_kfree(ice_hw_to_dev(hw), sw); +/** + * ice_cfg_fw_log - configure FW logging + * @hw: pointer to the hw struct + * @enable: enable certain FW logging events if true, disable all if false + * + * This function enables/disables the FW logging via Rx CQ events and a UART + * port based on predetermined configurations. FW logging via the Rx CQ can be + * enabled/disabled for individual PF's. However, FW logging via the UART can + * only be enabled/disabled for all PFs on the same device. + * + * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in + * hw->fw_log need to be set accordingly, e.g. based on user-provided input, + * before initializing the device. + * + * When re/configuring FW logging, callers need to update the "cfg" elements of + * the hw->fw_log.evnts array with the desired logging event configurations for + * modules of interest. When disabling FW logging completely, the callers can + * just pass false in the "enable" parameter. On completion, the function will + * update the "cur" element of the hw->fw_log.evnts array with the resulting + * logging event configurations of the modules that are being re/configured. FW + * logging modules that are not part of a reconfiguration operation retain their + * previous states. + * + * Before resetting the device, it is recommended that the driver disables FW + * logging before shutting down the control queue. When disabling FW logging + * ("enable" = false), the latest configurations of FW logging events stored in + * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after + * a device reset. + * + * When enabling FW logging to emit log messages via the Rx CQ during the + * device's initialization phase, a mechanism alternative to interrupt handlers + * needs to be used to extract FW log messages from the Rx CQ periodically and + * to prevent the Rx CQ from being full and stalling other types of control + * messages from FW to SW. Interrupts are typically disabled during the device's + * initialization phase. + */ +static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) +{ + struct ice_aqc_fw_logging_data *data = NULL; + struct ice_aqc_fw_logging *cmd; + enum ice_status status = 0; + u16 i, chgs = 0, len = 0; + struct ice_aq_desc desc; + u8 actv_evnts = 0; + void *buf = NULL; + + if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) + return 0; + + /* Disable FW logging only when the control queue is still responsive */ + if (!enable && + (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) + return 0; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); + cmd = &desc.params.fw_logging; + + /* Indicate which controls are valid */ + if (hw->fw_log.cq_en) + cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; + + if (hw->fw_log.uart_en) + cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; + + if (enable) { + /* Fill in an array of entries with FW logging modules and + * logging events being reconfigured. + */ + for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { + u16 val; + + /* Keep track of enabled event types */ + actv_evnts |= hw->fw_log.evnts[i].cfg; + + if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) + continue; + + if (!data) { + data = devm_kzalloc(ice_hw_to_dev(hw), + ICE_FW_LOG_DESC_SIZE_MAX, + GFP_KERNEL); + if (!data) + return ICE_ERR_NO_MEMORY; + } + + val = i << ICE_AQC_FW_LOG_ID_S; + val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; + data->entry[chgs++] = cpu_to_le16(val); + } + + /* Only enable FW logging if at least one module is specified. + * If FW logging is currently enabled but all modules are not + * enabled to emit log messages, disable FW logging altogether. + */ + if (actv_evnts) { + /* Leave if there is effectively no change */ + if (!chgs) + goto out; + + if (hw->fw_log.cq_en) + cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; + + if (hw->fw_log.uart_en) + cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; + + buf = data; + len = ICE_FW_LOG_DESC_SIZE(chgs); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + } + } + + status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); + if (!status) { + /* Update the current configuration to reflect events enabled. + * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW + * logging mode is enabled for the device. They do not reflect + * actual modules being enabled to emit log messages. So, their + * values remain unchanged even when all modules are disabled. + */ + u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; + + hw->fw_log.actv_evnts = actv_evnts; + for (i = 0; i < cnt; i++) { + u16 v, m; + + if (!enable) { + /* When disabling all FW logging events as part + * of device's de-initialization, the original + * configurations are retained, and can be used + * to reconfigure FW logging later if the device + * is re-initialized. + */ + hw->fw_log.evnts[i].cur = 0; + continue; + } + + v = le16_to_cpu(data->entry[i]); + m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; + hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; + } + } + +out: + if (data) + devm_kfree(ice_hw_to_dev(hw), data); + + return status; +} + +/** + * ice_output_fw_log + * @hw: pointer to the hw struct + * @desc: pointer to the AQ message descriptor + * @buf: pointer to the buffer accompanying the AQ message + * + * Formats a FW Log message and outputs it via the standard driver logs. + */ +void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) +{ + ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n"); + ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf, + le16_to_cpu(desc->datalen)); + ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n"); +} + +/** + * ice_get_itr_intrl_gran - determine int/intrl granularity + * @hw: pointer to the hw struct + * + * Determines the itr/intrl granularities based on the maximum aggregate + * bandwidth according to the device's configuration during power-on. + */ +static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw) +{ + u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & + GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> + GL_PWR_MODE_CTL_CAR_MAX_BW_S; + + switch (max_agg_bw) { + case ICE_MAX_AGG_BW_200G: + case ICE_MAX_AGG_BW_100G: + case ICE_MAX_AGG_BW_50G: + hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; + hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; + break; + case ICE_MAX_AGG_BW_25G: + hw->itr_gran = ICE_ITR_GRAN_MAX_25; + hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; + break; + default: + ice_debug(hw, ICE_DBG_INIT, + "Failed to determine itr/intrl granularity\n"); + return ICE_ERR_CFG; + } + + return 0; } /** @@ -400,16 +654,19 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) return status; - /* set these values to minimum allowed */ - hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200; - hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100; - hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50; - hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25; + status = ice_get_itr_intrl_gran(hw); + if (status) + return status; status = ice_init_all_ctrlq(hw); if (status) goto err_unroll_cqinit; + /* Enable FW logging. Not fatal if this fails. */ + status = ice_cfg_fw_log(hw, true); + if (status) + ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); + status = ice_clear_pf_cfg(hw); if (status) goto err_unroll_cqinit; @@ -472,6 +729,13 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_sched; + /* need a valid SW entry point to build a Tx tree */ + if (!hw->sw_entry_point_layer) { + ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); + status = ICE_ERR_CFG; + goto err_unroll_sched; + } + status = ice_init_fltr_mgmt_struct(hw); if (status) goto err_unroll_sched; @@ -494,7 +758,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_fltr_mgmt_struct; - ice_init_flex_parser(hw); + ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC); + ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2); return 0; @@ -515,15 +780,18 @@ err_unroll_cqinit: */ void ice_deinit_hw(struct ice_hw *hw) { + ice_cleanup_fltr_mgmt_struct(hw); + ice_sched_cleanup_all(hw); - ice_shutdown_all_ctrlq(hw); if (hw->port_info) { devm_kfree(ice_hw_to_dev(hw), hw->port_info); hw->port_info = NULL; } - ice_cleanup_fltr_mgmt_struct(hw); + /* Attempt to disable FW logging before shutting down control queues */ + ice_cfg_fw_log(hw, false); + ice_shutdown_all_ctrlq(hw); } /** @@ -652,6 +920,8 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); val = GLGEN_RTRIG_GLOBR_M; break; + default: + return ICE_ERR_PARAM; } val |= rd32(hw, GLGEN_RTRIG); @@ -904,7 +1174,22 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) * @timeout: the maximum time in ms that the driver may hold the resource * @cd: pointer to command details structure or NULL * - * requests common resource using the admin queue commands (0x0008) + * Requests common resource using the admin queue commands (0x0008). + * When attempting to acquire the Global Config Lock, the driver can + * learn of three states: + * 1) ICE_SUCCESS - acquired lock, and can perform download package + * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load + * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has + * successfully downloaded the package; the driver does + * not have to download the package and can continue + * loading + * + * Note that if the caller is in an acquire lock, perform action, release lock + * phase of operation, it is possible that the FW may detect a timeout and issue + * a CORER. In this case, the driver will receive a CORER interrupt and will + * have to determine its cause. The calling thread that is handling this flow + * will likely get an error propagated back to it indicating the Download + * Package, Update Package or the Release Resource AQ commands timed out. */ static enum ice_status ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, @@ -922,13 +1207,43 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, cmd_resp->res_id = cpu_to_le16(res); cmd_resp->access_type = cpu_to_le16(access); cmd_resp->res_number = cpu_to_le32(sdp_number); + cmd_resp->timeout = cpu_to_le32(*timeout); + *timeout = 0; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + /* The completion specifies the maximum time in ms that the driver * may hold the resource in the Timeout field. - * If the resource is held by someone else, the command completes with - * busy return value and the timeout field indicates the maximum time - * the current owner of the resource has to free it. + */ + + /* Global config lock response utilizes an additional status field. + * + * If the Global config lock resource is held by some other driver, the + * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field + * and the timeout field indicates the maximum time the current owner + * of the resource has to free it. + */ + if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { + if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { + *timeout = le32_to_cpu(cmd_resp->timeout); + return 0; + } else if (le16_to_cpu(cmd_resp->status) == + ICE_AQ_RES_GLBL_IN_PROG) { + *timeout = le32_to_cpu(cmd_resp->timeout); + return ICE_ERR_AQ_ERROR; + } else if (le16_to_cpu(cmd_resp->status) == + ICE_AQ_RES_GLBL_DONE) { + return ICE_ERR_AQ_NO_WORK; + } + + /* invalid FW response, force a timeout immediately */ + *timeout = 0; + return ICE_ERR_AQ_ERROR; + } + + /* If the resource is held by some other driver, the command completes + * with a busy return value and the timeout field indicates the maximum + * time the current owner of the resource has to free it. */ if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) *timeout = le32_to_cpu(cmd_resp->timeout); @@ -967,30 +1282,28 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, * @hw: pointer to the HW structure * @res: resource id * @access: access type (read or write) + * @timeout: timeout in milliseconds * * This function will attempt to acquire the ownership of a resource. */ enum ice_status ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, - enum ice_aq_res_access_type access) + enum ice_aq_res_access_type access, u32 timeout) { #define ICE_RES_POLLING_DELAY_MS 10 u32 delay = ICE_RES_POLLING_DELAY_MS; + u32 time_left = timeout; enum ice_status status; - u32 time_left = 0; - u32 timeout; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - /* An admin queue return code of ICE_AQ_RC_EEXIST means that another - * driver has previously acquired the resource and performed any - * necessary updates; in this case the caller does not obtain the - * resource and has no further work to do. + /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has + * previously acquired the resource and performed any necessary updates; + * in this case the caller does not obtain the resource and has no + * further work to do. */ - if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { - status = ICE_ERR_AQ_NO_WORK; + if (status == ICE_ERR_AQ_NO_WORK) goto ice_acquire_res_exit; - } if (status) ice_debug(hw, ICE_DBG_RES, @@ -1003,11 +1316,9 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, timeout = (timeout > delay) ? timeout - delay : 0; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { + if (status == ICE_ERR_AQ_NO_WORK) /* lock free, but no work to do */ - status = ICE_ERR_AQ_NO_WORK; break; - } if (!status) /* lock acquired */ @@ -1095,6 +1406,28 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, u16 cap = le16_to_cpu(cap_resp->cap); switch (cap) { + case ICE_AQC_CAPS_SRIOV: + caps->sr_iov_1_1 = (number == 1); + ice_debug(hw, ICE_DBG_INIT, + "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1); + break; + case ICE_AQC_CAPS_VF: + if (dev_p) { + dev_p->num_vfs_exposed = number; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: VFs exposed = %d\n", + dev_p->num_vfs_exposed); + } else if (func_p) { + func_p->num_allocd_vfs = number; + func_p->vf_base_id = logical_id; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: VFs allocated = %d\n", + func_p->num_allocd_vfs); + ice_debug(hw, ICE_DBG_INIT, + "HW caps: VF base_id = %d\n", + func_p->vf_base_id); + } + break; case ICE_AQC_CAPS_VSI: if (dev_p) { dev_p->num_vsi_allocd_to_host = number; @@ -1171,7 +1504,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, * @hw: pointer to the hw struct * @buf: a virtual buffer to hold the capabilities * @buf_size: Size of the virtual buffer - * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM + * @cap_count: cap count needed if AQ err==ENOMEM * @opc: capabilities type to discover - pass in the command opcode * @cd: pointer to command details structure or NULL * @@ -1179,7 +1512,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, * the firmware. */ static enum ice_status -ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size, +ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aqc_list_caps *cmd; @@ -1197,59 +1530,77 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size, status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status) ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc); - *data_size = le16_to_cpu(desc.datalen); - + else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM) + *cap_count = + DIV_ROUND_UP(le16_to_cpu(desc.datalen), + sizeof(struct ice_aqc_list_caps_elem)); return status; } /** - * ice_get_caps - get info about the HW + * ice_discover_caps - get info about the HW * @hw: pointer to the hardware structure + * @opc: capabilities type to discover - pass in the command opcode */ -enum ice_status ice_get_caps(struct ice_hw *hw) +static enum ice_status ice_discover_caps(struct ice_hw *hw, + enum ice_adminq_opc opc) { enum ice_status status; - u16 data_size = 0; + u32 cap_count; u16 cbuf_len; u8 retries; /* The driver doesn't know how many capabilities the device will return * so the buffer size required isn't known ahead of time. The driver * starts with cbuf_len and if this turns out to be insufficient, the - * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs. - * The driver then allocates the buffer of this size and retries the - * operation. So it follows that the retry count is 2. + * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs. + * The driver then allocates the buffer based on the count and retries + * the operation. So it follows that the retry count is 2. */ #define ICE_GET_CAP_BUF_COUNT 40 #define ICE_GET_CAP_RETRY_COUNT 2 - cbuf_len = ICE_GET_CAP_BUF_COUNT * - sizeof(struct ice_aqc_list_caps_elem); - + cap_count = ICE_GET_CAP_BUF_COUNT; retries = ICE_GET_CAP_RETRY_COUNT; do { void *cbuf; + cbuf_len = (u16)(cap_count * + sizeof(struct ice_aqc_list_caps_elem)); cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL); if (!cbuf) return ICE_ERR_NO_MEMORY; - status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size, - ice_aqc_opc_list_func_caps, NULL); + status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count, + opc, NULL); devm_kfree(ice_hw_to_dev(hw), cbuf); if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) break; /* If ENOMEM is returned, try again with bigger buffer */ - cbuf_len = data_size; } while (--retries); return status; } /** + * ice_get_caps - get info about the HW + * @hw: pointer to the hardware structure + */ +enum ice_status ice_get_caps(struct ice_hw *hw) +{ + enum ice_status status; + + status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps); + if (!status) + status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps); + + return status; +} + +/** * ice_aq_manage_mac_write - manage MAC address write command * @hw: pointer to the hw struct * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address @@ -1307,6 +1658,110 @@ void ice_clear_pxe_mode(struct ice_hw *hw) } /** + * ice_get_link_speed_based_on_phy_type - returns link speed + * @phy_type_low: lower part of phy_type + * + * This helper function will convert a phy_type_low to its corresponding link + * speed. + * Note: In the structure of phy_type_low, there should be one bit set, as + * this function will convert one phy type to its speed. + * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned + * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned + */ +static u16 +ice_get_link_speed_based_on_phy_type(u64 phy_type_low) +{ + u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; + + switch (phy_type_low) { + case ICE_PHY_TYPE_LOW_100BASE_TX: + case ICE_PHY_TYPE_LOW_100M_SGMII: + speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; + break; + case ICE_PHY_TYPE_LOW_1000BASE_T: + case ICE_PHY_TYPE_LOW_1000BASE_SX: + case ICE_PHY_TYPE_LOW_1000BASE_LX: + case ICE_PHY_TYPE_LOW_1000BASE_KX: + case ICE_PHY_TYPE_LOW_1G_SGMII: + speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; + break; + case ICE_PHY_TYPE_LOW_2500BASE_T: + case ICE_PHY_TYPE_LOW_2500BASE_X: + case ICE_PHY_TYPE_LOW_2500BASE_KX: + speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; + break; + case ICE_PHY_TYPE_LOW_5GBASE_T: + case ICE_PHY_TYPE_LOW_5GBASE_KR: + speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; + break; + case ICE_PHY_TYPE_LOW_10GBASE_T: + case ICE_PHY_TYPE_LOW_10G_SFI_DA: + case ICE_PHY_TYPE_LOW_10GBASE_SR: + case ICE_PHY_TYPE_LOW_10GBASE_LR: + case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: + speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; + break; + case ICE_PHY_TYPE_LOW_25GBASE_T: + case ICE_PHY_TYPE_LOW_25GBASE_CR: + case ICE_PHY_TYPE_LOW_25GBASE_CR_S: + case ICE_PHY_TYPE_LOW_25GBASE_CR1: + case ICE_PHY_TYPE_LOW_25GBASE_SR: + case ICE_PHY_TYPE_LOW_25GBASE_LR: + case ICE_PHY_TYPE_LOW_25GBASE_KR: + case ICE_PHY_TYPE_LOW_25GBASE_KR_S: + case ICE_PHY_TYPE_LOW_25GBASE_KR1: + case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_25G_AUI_C2C: + speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; + break; + case ICE_PHY_TYPE_LOW_40GBASE_CR4: + case ICE_PHY_TYPE_LOW_40GBASE_SR4: + case ICE_PHY_TYPE_LOW_40GBASE_LR4: + case ICE_PHY_TYPE_LOW_40GBASE_KR4: + case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_40G_XLAUI: + speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; + break; + default: + speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; + break; + } + + return speed_phy_type_low; +} + +/** + * ice_update_phy_type + * @phy_type_low: pointer to the lower part of phy_type + * @link_speeds_bitmap: targeted link speeds bitmap + * + * Note: For the link_speeds_bitmap structure, you can check it at + * [ice_aqc_get_link_status->link_speed]. Caller can pass in + * link_speeds_bitmap include multiple speeds. + * + * The value of phy_type_low will present a certain link speed. This helper + * function will turn on bits in the phy_type_low based on the value of + * link_speeds_bitmap input parameter. + */ +void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap) +{ + u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN; + u64 pt_low; + int index; + + /* We first check with low part of phy_type */ + for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { + pt_low = BIT_ULL(index); + speed = ice_get_link_speed_based_on_phy_type(pt_low); + + if (link_speeds_bitmap & speed) + *phy_type_low |= BIT_ULL(index); + } +} + +/** * ice_aq_set_phy_cfg * @hw: pointer to the hw struct * @lport: logical port number @@ -1318,19 +1773,18 @@ void ice_clear_pxe_mode(struct ice_hw *hw) * mode as the PF may not have the privilege to set some of the PHY Config * parameters. This status will be indicated by the command response (0x0601). */ -static enum ice_status +enum ice_status ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) { - struct ice_aqc_set_phy_cfg *cmd; struct ice_aq_desc desc; if (!cfg) return ICE_ERR_PARAM; - cmd = &desc.params.set_phy; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); - cmd->lport_num = lport; + desc.params.set_phy.lport_num = lport; + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); } @@ -1339,8 +1793,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, * ice_update_link_info - update status of the HW network link * @pi: port info structure of the interested logical port */ -static enum ice_status -ice_update_link_info(struct ice_port_info *pi) +enum ice_status ice_update_link_info(struct ice_port_info *pi) { struct ice_aqc_get_phy_caps_data *pcaps; struct ice_phy_info *phy_info; @@ -1379,12 +1832,12 @@ out: * ice_set_fc * @pi: port information structure * @aq_failures: pointer to status code, specific to ice_set_fc routine - * @atomic_restart: enable automatic link update + * @ena_auto_link_update: enable automatic link update * * Set the requested flow control mode. */ enum ice_status -ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart) +ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) { struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_aqc_get_phy_caps_data *pcaps; @@ -1434,8 +1887,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart) int retry_count, retry_max = 10; /* Auto restart link so settings take effect */ - if (atomic_restart) - cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK; + if (ena_auto_link_update) + cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; /* Copy over all the old settings */ cfg.phy_type_low = pcaps->phy_type_low; cfg.low_power_ctrl = pcaps->low_power_ctrl; @@ -1654,7 +2107,7 @@ ice_aq_get_set_rss_lut_exit: /** * ice_aq_get_rss_lut * @hw: pointer to the hardware structure - * @vsi_id: VSI FW index + * @vsi_handle: software VSI handle * @lut_type: LUT table type * @lut: pointer to the LUT buffer provided by the caller * @lut_size: size of the LUT buffer @@ -1662,17 +2115,20 @@ ice_aq_get_set_rss_lut_exit: * get the RSS lookup table, PF or VSI type */ enum ice_status -ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, - u16 lut_size) +ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, + u8 *lut, u16 lut_size) { - return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0, - false); + if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) + return ICE_ERR_PARAM; + + return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), + lut_type, lut, lut_size, 0, false); } /** * ice_aq_set_rss_lut * @hw: pointer to the hardware structure - * @vsi_id: VSI FW index + * @vsi_handle: software VSI handle * @lut_type: LUT table type * @lut: pointer to the LUT buffer provided by the caller * @lut_size: size of the LUT buffer @@ -1680,11 +2136,14 @@ ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, * set the RSS lookup table, PF or VSI type */ enum ice_status -ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, - u16 lut_size) +ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, + u8 *lut, u16 lut_size) { - return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0, - true); + if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) + return ICE_ERR_PARAM; + + return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), + lut_type, lut, lut_size, 0, true); } /** @@ -1725,31 +2184,39 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, /** * ice_aq_get_rss_key * @hw: pointer to the hw struct - * @vsi_id: VSI FW index + * @vsi_handle: software VSI handle * @key: pointer to key info struct * * get the RSS key per VSI */ enum ice_status -ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id, +ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *key) { - return __ice_aq_get_set_rss_key(hw, vsi_id, key, false); + if (!ice_is_vsi_valid(hw, vsi_handle) || !key) + return ICE_ERR_PARAM; + + return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), + key, false); } /** * ice_aq_set_rss_key * @hw: pointer to the hw struct - * @vsi_id: VSI FW index + * @vsi_handle: software VSI handle * @keys: pointer to key info struct * * set the RSS key per VSI */ enum ice_status -ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id, +ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys) { - return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true); + if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) + return ICE_ERR_PARAM; + + return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), + keys, true); } /** @@ -1820,6 +2287,8 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, * @num_qgrps: number of groups in the list * @qg_list: the list of groups to disable * @buf_size: the total size of the qg_list buffer in bytes + * @rst_src: if called due to reset, specifies the RST source + * @vmvf_num: the relative VM or VF number that is undergoing the reset * @cd: pointer to command details structure or NULL * * Disable LAN Tx queue (0x0C31) @@ -1827,6 +2296,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, static enum ice_status ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, + enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { struct ice_aqc_dis_txqs *cmd; @@ -1836,14 +2306,45 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, cmd = &desc.params.dis_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); - if (!qg_list) + /* qg_list can be NULL only in VM/VF reset flow */ + if (!qg_list && !rst_src) return ICE_ERR_PARAM; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) return ICE_ERR_PARAM; - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + cmd->num_entries = num_qgrps; + cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & + ICE_AQC_Q_DIS_TIMEOUT_M); + + switch (rst_src) { + case ICE_VM_RESET: + cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; + cmd->vmvf_and_timeout |= + cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); + break; + case ICE_VF_RESET: + cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; + /* In this case, FW expects vmvf_num to be absolute VF id */ + cmd->vmvf_and_timeout |= + cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & + ICE_AQC_Q_DIS_VMVF_NUM_M); + break; + case ICE_NO_RESET: + default: + break; + } + + /* If no queue group info, we are in a reset flow. Issue the AQ */ + if (!qg_list) + goto do_aq; + + /* set RD bit to indicate that command buffer is provided by the driver + * and it needs to be read by the firmware + */ + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + for (i = 0; i < num_qgrps; ++i) { /* Calculate the size taken up by the queue IDs in this group */ sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); @@ -1859,6 +2360,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, if (buf_size != sz) return ICE_ERR_PARAM; +do_aq: return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); } @@ -2088,7 +2590,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) /** * ice_ena_vsi_txq * @pi: port information structure - * @vsi_id: VSI id + * @vsi_handle: software VSI handle * @tc: tc number * @num_qgrps: Number of added queue groups * @buf: list of queue groups to be added @@ -2098,7 +2600,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) * This function adds one lan q */ enum ice_status -ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps, +ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd) { @@ -2115,15 +2617,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps, hw = pi->hw; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + mutex_lock(&pi->sched_lock); /* find a parent node */ - parent = ice_sched_get_free_qparent(pi, vsi_id, tc, + parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, ICE_SCHED_NODE_OWNER_LAN); if (!parent) { status = ICE_ERR_PARAM; goto ena_txq_exit; } + buf->parent_teid = parent->info.node_teid; node.parent_teid = parent->info.node_teid; /* Mark that the values in the "generic" section as valid. The default @@ -2161,13 +2667,16 @@ ena_txq_exit: * @num_queues: number of queues * @q_ids: pointer to the q_id array * @q_teids: pointer to queue node teids + * @rst_src: if called due to reset, specifies the RST source + * @vmvf_num: the relative VM or VF number that is undergoing the reset * @cd: pointer to command details structure or NULL * * This function removes queues and their corresponding nodes in SW DB */ enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, - u32 *q_teids, struct ice_sq_cd *cd) + u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, + struct ice_sq_cd *cd) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_aqc_dis_txq_item qg_list; @@ -2176,6 +2685,15 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; + /* if queue is disabled already yet the disable queue command has to be + * sent to complete the VF reset, then call ice_aq_dis_lan_txq without + * any queue information + */ + + if (!num_queues && rst_src) + return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num, + NULL); + mutex_lock(&pi->sched_lock); for (i = 0; i < num_queues; i++) { @@ -2188,7 +2706,8 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, qg_list.num_qs = 1; qg_list.q_id[0] = cpu_to_le16(q_ids[i]); status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, - sizeof(qg_list), cd); + sizeof(qg_list), rst_src, vmvf_num, + cd); if (status) break; @@ -2201,7 +2720,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, /** * ice_cfg_vsi_qs - configure the new/exisiting VSI queues * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap * @maxqs: max queues array per TC * @owner: lan or rdma @@ -2209,7 +2728,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, * This function adds/updates the VSI queues per TC. */ static enum ice_status -ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, +ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *maxqs, u8 owner) { enum ice_status status = 0; @@ -2218,6 +2737,9 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + mutex_lock(&pi->sched_lock); for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { @@ -2225,7 +2747,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, if (!ice_sched_get_tc_node(pi, i)) continue; - status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner, + status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, ice_is_tc_ena(tc_bitmap, i)); if (status) break; @@ -2238,16 +2760,140 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, /** * ice_cfg_vsi_lan - configure VSI lan queues * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap * @max_lanqs: max lan queues array per TC * * This function adds/updates the VSI lan queues per TC. */ enum ice_status -ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, +ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *max_lanqs) { - return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs, + return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, ICE_SCHED_NODE_OWNER_LAN); } + +/** + * ice_replay_pre_init - replay pre initialization + * @hw: pointer to the hw struct + * + * Initializes required config data for VSI, FD, ACL, and RSS before replay. + */ +static enum ice_status ice_replay_pre_init(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + u8 i; + + /* Delete old entries from replay filter list head if there is any */ + ice_rm_all_sw_replay_rule_info(hw); + /* In start of replay, move entries into replay_rules list, it + * will allow adding rules entries back to filt_rules list, + * which is operational list. + */ + for (i = 0; i < ICE_SW_LKUP_LAST; i++) + list_replace_init(&sw->recp_list[i].filt_rules, + &sw->recp_list[i].filt_replay_rules); + + return 0; +} + +/** + * ice_replay_vsi - replay VSI configuration + * @hw: pointer to the hw struct + * @vsi_handle: driver VSI handle + * + * Restore all VSI configuration after reset. It is required to call this + * function with main VSI first. + */ +enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) +{ + enum ice_status status; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + /* Replay pre-initialization if there is any */ + if (vsi_handle == ICE_MAIN_VSI_HANDLE) { + status = ice_replay_pre_init(hw); + if (status) + return status; + } + + /* Replay per VSI all filters */ + status = ice_replay_vsi_all_fltr(hw, vsi_handle); + return status; +} + +/** + * ice_replay_post - post replay configuration cleanup + * @hw: pointer to the hw struct + * + * Post replay cleanup. + */ +void ice_replay_post(struct ice_hw *hw) +{ + /* Delete old entries from replay filter list head */ + ice_rm_all_sw_replay_rule_info(hw); +} + +/** + * ice_stat_update40 - read 40 bit stat from the chip and update stat values + * @hw: ptr to the hardware info + * @hireg: high 32 bit HW register to read from + * @loreg: low 32 bit HW register to read from + * @prev_stat_loaded: bool to specify if previous stats are loaded + * @prev_stat: ptr to previous loaded stat value + * @cur_stat: ptr to current stat value + */ +void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, + bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) +{ + u64 new_data; + + new_data = rd32(hw, loreg); + new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; + + /* device stats are not reset at PFR, they likely will not be zeroed + * when the driver starts. So save the first values read and use them as + * offsets to be subtracted from the raw values in order to report stats + * that count from zero. + */ + if (!prev_stat_loaded) + *prev_stat = new_data; + if (new_data >= *prev_stat) + *cur_stat = new_data - *prev_stat; + else + /* to manage the potential roll-over */ + *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; + *cur_stat &= 0xFFFFFFFFFFULL; +} + +/** + * ice_stat_update32 - read 32 bit stat from the chip and update stat values + * @hw: ptr to the hardware info + * @reg: HW register to read from + * @prev_stat_loaded: bool to specify if previous stats are loaded + * @prev_stat: ptr to previous loaded stat value + * @cur_stat: ptr to current stat value + */ +void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat) +{ + u32 new_data; + + new_data = rd32(hw, reg); + + /* device stats are not reset at PFR, they likely will not be zeroed + * when the driver starts. So save the first values read and use them as + * offsets to be subtracted from the raw values in order to report stats + * that count from zero. + */ + if (!prev_stat_loaded) + *prev_stat = new_data; + if (new_data >= *prev_stat) + *cur_stat = new_data - *prev_stat; + else + /* to manage the potential roll-over */ + *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; +} diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 9a5519130af1..1900681289a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -7,6 +7,7 @@ #include "ice.h" #include "ice_type.h" #include "ice_switch.h" +#include <linux/avf/virtchnl.h> void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len); @@ -21,9 +22,10 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending); enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up); +enum ice_status ice_update_link_info(struct ice_port_info *pi); enum ice_status ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, - enum ice_aq_res_access_type access); + enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); enum ice_status ice_init_nvm(struct ice_hw *hw); enum ice_status @@ -37,17 +39,18 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index); enum ice_status -ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, +ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut, u16 lut_size); enum ice_status -ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, +ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut, u16 lut_size); enum ice_status -ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id, +ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); enum ice_status -ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id, +ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); + bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); @@ -58,12 +61,24 @@ enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); + +enum ice_status +ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, + struct ice_aqc_get_phy_caps_data *caps, + struct ice_sq_cd *cd); +void +ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap); enum ice_status ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); enum ice_status -ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart); +ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, + struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); +enum ice_status +ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, + bool ena_auto_link_update); + enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); @@ -75,12 +90,20 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd); enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, - u32 *q_teids, struct ice_sq_cd *cmd_details); + u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, + struct ice_sq_cd *cmd_details); enum ice_status -ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, +ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *max_lanqs); enum ice_status -ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps, +ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd); +enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); +void ice_replay_post(struct ice_hw *hw); +void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); +void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, + bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 62be72fdc8f3..84c967294eaf 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -33,6 +33,36 @@ static void ice_adminq_init_regs(struct ice_hw *hw) } /** + * ice_mailbox_init_regs - Initialize Mailbox registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_sq and alloc_rq functions have already been called + */ +static void ice_mailbox_init_regs(struct ice_hw *hw) +{ + struct ice_ctl_q_info *cq = &hw->mailboxq; + + /* set head and tail registers in our local struct */ + cq->sq.head = PF_MBX_ATQH; + cq->sq.tail = PF_MBX_ATQT; + cq->sq.len = PF_MBX_ATQLEN; + cq->sq.bah = PF_MBX_ATQBAH; + cq->sq.bal = PF_MBX_ATQBAL; + cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M; + cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M; + cq->sq.head_mask = PF_MBX_ATQH_ATQH_M; + + cq->rq.head = PF_MBX_ARQH; + cq->rq.tail = PF_MBX_ARQT; + cq->rq.len = PF_MBX_ARQLEN; + cq->rq.bah = PF_MBX_ARQBAH; + cq->rq.bal = PF_MBX_ARQBAL; + cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M; + cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M; + cq->rq.head_mask = PF_MBX_ARQH_ARQH_M; +} + +/** * ice_check_sq_alive * @hw: pointer to the hw struct * @cq: pointer to the specific Control queue @@ -518,22 +548,31 @@ shutdown_sq_out: /** * ice_aq_ver_check - Check the reported AQ API version. - * @fw_branch: The "branch" of FW, typically describes the device type - * @fw_major: The major version of the FW API - * @fw_minor: The minor version increment of the FW API + * @hw: pointer to the hardware structure * * Checks if the driver should load on a given AQ API version. * * Return: 'true' iff the driver should attempt to load. 'false' otherwise. */ -static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor) +static bool ice_aq_ver_check(struct ice_hw *hw) { - if (fw_branch != EXP_FW_API_VER_BRANCH) - return false; - if (fw_major != EXP_FW_API_VER_MAJOR) - return false; - if (fw_minor != EXP_FW_API_VER_MINOR) + if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { + /* Major API version is newer than expected, don't load */ + dev_warn(ice_hw_to_dev(hw), + "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); return false; + } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { + if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) + dev_info(ice_hw_to_dev(hw), + "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) + dev_info(ice_hw_to_dev(hw), + "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + } else { + /* Major API version is older than expected, log a warning */ + dev_info(ice_hw_to_dev(hw), + "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + } return true; } @@ -588,8 +627,7 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) if (status) goto init_ctrlq_free_rq; - if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver, - hw->api_min_ver)) { + if (!ice_aq_ver_check(hw)) { status = ICE_ERR_FW_API_VER; goto init_ctrlq_free_rq; } @@ -597,11 +635,11 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) return 0; init_ctrlq_free_rq: - if (cq->rq.head) { + if (cq->rq.count) { ice_shutdown_rq(hw, cq); mutex_destroy(&cq->rq_lock); } - if (cq->sq.head) { + if (cq->sq.count) { ice_shutdown_sq(hw, cq); mutex_destroy(&cq->sq_lock); } @@ -631,6 +669,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) ice_adminq_init_regs(hw); cq = &hw->adminq; break; + case ICE_CTL_Q_MAILBOX: + ice_mailbox_init_regs(hw); + cq = &hw->mailboxq; + break; default: return ICE_ERR_PARAM; } @@ -688,7 +730,12 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) if (ret_code) return ret_code; - return ice_init_check_adminq(hw); + ret_code = ice_init_check_adminq(hw); + if (ret_code) + return ret_code; + + /* Init Mailbox queue */ + return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); } /** @@ -706,15 +753,18 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) if (ice_check_sq_alive(hw, cq)) ice_aq_q_shutdown(hw, true); break; + case ICE_CTL_Q_MAILBOX: + cq = &hw->mailboxq; + break; default: return; } - if (cq->sq.head) { + if (cq->sq.count) { ice_shutdown_sq(hw, cq); mutex_destroy(&cq->sq_lock); } - if (cq->rq.head) { + if (cq->rq.count) { ice_shutdown_rq(hw, cq); mutex_destroy(&cq->rq_lock); } @@ -728,6 +778,8 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) { /* Shutdown FW admin queue */ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + /* Shutdown PF-VF Mailbox */ + ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); } /** @@ -806,6 +858,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, u16 retval = 0; u32 val = 0; + /* if reset is in progress return a soft error */ + if (hw->reset_ongoing) + return ICE_ERR_RESET_ONGOING; mutex_lock(&cq->sq_lock); cq->sq_last_status = ICE_AQ_RC_OK; @@ -847,7 +902,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); if (cd) - memcpy(details, cd, sizeof(*details)); + *details = *cd; else memset(details, 0, sizeof(*details)); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index ea02b89243e2..437f832fd7c4 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -8,6 +8,7 @@ /* Maximum buffer lengths for all control queue types */ #define ICE_AQ_MAX_BUF_LEN 4096 +#define ICE_MBXQ_MAX_BUF_LEN 4096 #define ICE_CTL_Q_DESC(R, i) \ (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) @@ -28,6 +29,7 @@ enum ice_ctl_q { ICE_CTL_Q_UNKNOWN = 0, ICE_CTL_Q_ADMIN, + ICE_CTL_Q_MAILBOX, }; /* Control Queue default settings */ diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index 0e14d7215a6e..a6f0a5c0c305 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -5,15 +5,11 @@ #define _ICE_DEVIDS_H_ /* Device IDs */ -/* Intel(R) Ethernet Controller C810 for backplane */ +/* Intel(R) Ethernet Controller E810-C for backplane */ #define ICE_DEV_ID_C810_BACKPLANE 0x1591 -/* Intel(R) Ethernet Controller C810 for QSFP */ +/* Intel(R) Ethernet Controller E810-C for QSFP */ #define ICE_DEV_ID_C810_QSFP 0x1592 -/* Intel(R) Ethernet Controller C810 for SFP */ +/* Intel(R) Ethernet Controller E810-C for SFP */ #define ICE_DEV_ID_C810_SFP 0x1593 -/* Intel(R) Ethernet Controller C810/X557-AT 10GBASE-T */ -#define ICE_DEV_ID_C810_10G_BASE_T 0x1594 -/* Intel(R) Ethernet Controller C810 1GbE */ -#define ICE_DEV_ID_C810_SGMII 0x1595 #endif /* _ICE_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index c71a9b528d6d..96923580f2a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -332,58 +332,473 @@ ice_get_ethtool_stats(struct net_device *netdev, } } -static int -ice_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *ks) +/** + * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes + * @netdev: network interface device structure + * @ks: ethtool link ksettings struct to fill out + */ +static void ice_phy_type_to_ethtool(struct net_device *netdev, + struct ethtool_link_ksettings *ks) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; - bool link_up; + u64 phy_types_low; hw_link_info = &vsi->port_info->phy.link_info; - link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; + phy_types_low = vsi->port_info->phy.phy_type_low; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + + if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || + phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX || + phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 2500baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X || + phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseX_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 2500baseX_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 5000baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 5000baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA || + phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || + phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseSR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || + phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseSR4_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseLR4_Full); + } - ethtool_link_ksettings_add_link_mode(ks, supported, - 10000baseT_Full); - ethtool_link_ksettings_add_link_mode(ks, advertising, - 10000baseT_Full); + /* Autoneg PHY types */ + if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || + phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX || + phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX || + phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR || + phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 || + phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || + phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Autoneg); + } +} - /* set speed and duplex */ - if (link_up) { - switch (hw_link_info->link_speed) { - case ICE_AQ_LINK_SPEED_100MB: - ks->base.speed = SPEED_100; - break; - case ICE_AQ_LINK_SPEED_2500MB: - ks->base.speed = SPEED_2500; - break; - case ICE_AQ_LINK_SPEED_5GB: - ks->base.speed = SPEED_5000; - break; - case ICE_AQ_LINK_SPEED_10GB: - ks->base.speed = SPEED_10000; - break; - case ICE_AQ_LINK_SPEED_25GB: - ks->base.speed = SPEED_25000; - break; - case ICE_AQ_LINK_SPEED_40GB: - ks->base.speed = SPEED_40000; - break; - default: - ks->base.speed = SPEED_UNKNOWN; - break; - } +#define TEST_SET_BITS_TIMEOUT 50 +#define TEST_SET_BITS_SLEEP_MAX 2000 +#define TEST_SET_BITS_SLEEP_MIN 1000 - ks->base.duplex = DUPLEX_FULL; - } else { - ks->base.speed = SPEED_UNKNOWN; - ks->base.duplex = DUPLEX_UNKNOWN; +/** + * ice_get_settings_link_up - Get Link settings for when link is up + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + */ +static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ethtool_link_ksettings cap_ksettings; + struct ice_link_status *link_info; + struct ice_vsi *vsi = np->vsi; + bool unrecog_phy_low = false; + + link_info = &vsi->port_info->phy.link_info; + + /* Initialize supported and advertised settings based on phy settings */ + switch (link_info->phy_type_low) { + case ICE_PHY_TYPE_LOW_100BASE_TX: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); + break; + case ICE_PHY_TYPE_LOW_100M_SGMII: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + break; + case ICE_PHY_TYPE_LOW_1000BASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_1G_SGMII: + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_1000BASE_SX: + case ICE_PHY_TYPE_LOW_1000BASE_LX: + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + break; + case ICE_PHY_TYPE_LOW_1000BASE_KX: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + break; + case ICE_PHY_TYPE_LOW_2500BASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 2500baseT_Full); + break; + case ICE_PHY_TYPE_LOW_2500BASE_X: + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseX_Full); + break; + case ICE_PHY_TYPE_LOW_2500BASE_KX: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 2500baseX_Full); + break; + case ICE_PHY_TYPE_LOW_5GBASE_T: + case ICE_PHY_TYPE_LOW_5GBASE_KR: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 5000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 5000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_10GBASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_10G_SFI_DA: + case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_10GBASE_SR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + break; + case ICE_PHY_TYPE_LOW_10GBASE_LR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + break; + case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + break; + case ICE_PHY_TYPE_LOW_25GBASE_T: + case ICE_PHY_TYPE_LOW_25GBASE_CR: + case ICE_PHY_TYPE_LOW_25GBASE_CR_S: + case ICE_PHY_TYPE_LOW_25GBASE_CR1: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + break; + case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + break; + case ICE_PHY_TYPE_LOW_25GBASE_SR: + case ICE_PHY_TYPE_LOW_25GBASE_LR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + break; + case ICE_PHY_TYPE_LOW_25GBASE_KR: + case ICE_PHY_TYPE_LOW_25GBASE_KR1: + case ICE_PHY_TYPE_LOW_25GBASE_KR_S: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + break; + case ICE_PHY_TYPE_LOW_40GBASE_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); + break; + case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_40G_XLAUI: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + break; + case ICE_PHY_TYPE_LOW_40GBASE_SR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + break; + case ICE_PHY_TYPE_LOW_40GBASE_LR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + break; + case ICE_PHY_TYPE_LOW_40GBASE_KR4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + break; + default: + unrecog_phy_low = true; + } + + if (unrecog_phy_low) { + /* if we got here and link is up something bad is afoot */ + netdev_info(netdev, "WARNING: Unrecognized PHY_Low (0x%llx).\n", + (u64)link_info->phy_type_low); } + /* Now that we've worked out everything that could be supported by the + * current PHY type, get what is supported by the NVM and intersect + * them to get what is truly supported + */ + memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); + ice_phy_type_to_ethtool(netdev, &cap_ksettings); + ethtool_intersect_link_masks(ks, &cap_ksettings); + + switch (link_info->link_speed) { + case ICE_AQ_LINK_SPEED_40GB: + ks->base.speed = SPEED_40000; + break; + case ICE_AQ_LINK_SPEED_25GB: + ks->base.speed = SPEED_25000; + break; + case ICE_AQ_LINK_SPEED_20GB: + ks->base.speed = SPEED_20000; + break; + case ICE_AQ_LINK_SPEED_10GB: + ks->base.speed = SPEED_10000; + break; + case ICE_AQ_LINK_SPEED_5GB: + ks->base.speed = SPEED_5000; + break; + case ICE_AQ_LINK_SPEED_2500MB: + ks->base.speed = SPEED_2500; + break; + case ICE_AQ_LINK_SPEED_1000MB: + ks->base.speed = SPEED_1000; + break; + case ICE_AQ_LINK_SPEED_100MB: + ks->base.speed = SPEED_100; + break; + default: + netdev_info(netdev, + "WARNING: Unrecognized link_speed (0x%x).\n", + link_info->link_speed); + break; + } + ks->base.duplex = DUPLEX_FULL; +} + +/** + * ice_get_settings_link_down - Get the Link settings when link is down + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + * + * Reports link settings that can be determined when link is down + */ +static void +ice_get_settings_link_down(struct ethtool_link_ksettings *ks, + struct net_device __always_unused *netdev) +{ + /* link is down and the driver needs to fall back on + * supported phy types to figure out what info to display + */ + ice_phy_type_to_ethtool(netdev, ks); + + /* With no link, speed and duplex are unknown */ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; +} + +/** + * ice_get_link_ksettings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Reports speed/duplex settings based on media_type + */ +static int ice_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_link_status *hw_link_info; + struct ice_vsi *vsi = np->vsi; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + hw_link_info = &vsi->port_info->phy.link_info; + + /* set speed and duplex */ + if (hw_link_info->link_info & ICE_AQ_LINK_UP) + ice_get_settings_link_up(ks, netdev); + else + ice_get_settings_link_down(ks, netdev); + /* set autoneg settings */ - ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ? - AUTONEG_ENABLE : AUTONEG_DISABLE); + ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; /* set media type settings */ switch (vsi->port_info->phy.media_type) { @@ -442,6 +857,311 @@ ice_get_link_ksettings(struct net_device *netdev, } /** + * ice_ksettings_find_adv_link_speed - Find advertising link speed + * @ks: ethtool ksettings + */ +static u16 +ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) +{ + u16 adv_link_speed = 0; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100baseT_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_100MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseKX_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 2500baseT_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 2500baseX_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 5000baseT_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_5GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKR_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseLR_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseCR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseKR_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_25GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseCR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseSR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseLR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseKR4_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_40GB; + + return adv_link_speed; +} + +/** + * ice_setup_autoneg + * @p: port info + * @ks: ethtool_link_ksettings + * @config: configuration that will be sent down to FW + * @autoneg_enabled: autonegotiation is enabled or not + * @autoneg_changed: will there a change in autonegotiation + * @netdev: network interface device structure + * + * Setup PHY autonegotiation feature + */ +static int +ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, + struct ice_aqc_set_phy_cfg_data *config, + u8 autoneg_enabled, u8 *autoneg_changed, + struct net_device *netdev) +{ + int err = 0; + + *autoneg_changed = 0; + + /* Check autoneg */ + if (autoneg_enabled == AUTONEG_ENABLE) { + /* If autoneg was not already enabled */ + if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) { + /* If autoneg is not supported, return error */ + if (!ethtool_link_ksettings_test_link_mode(ks, + supported, + Autoneg)) { + netdev_info(netdev, "Autoneg not supported on this phy.\n"); + err = -EINVAL; + } else { + /* Autoneg is allowed to change */ + config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + *autoneg_changed = 1; + } + } + } else { + /* If autoneg is currently enabled */ + if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) { + /* If autoneg is supported 10GBASE_T is the only phy + * that can disable it, so otherwise return error + */ + if (ethtool_link_ksettings_test_link_mode(ks, + supported, + Autoneg)) { + netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + } else { + /* Autoneg is allowed to change */ + config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + *autoneg_changed = 1; + } + } + } + + return err; +} + +/** + * ice_set_link_ksettings - Set Speed and Duplex + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Set speed/duplex per media_types advertised/forced + */ +static int ice_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0; + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ethtool_link_ksettings safe_ks, copy_ks; + struct ice_aqc_get_phy_caps_data *abilities; + u16 adv_link_speed, curr_link_speed, idx; + struct ice_aqc_set_phy_cfg_data config; + struct ice_pf *pf = np->vsi->back; + struct ice_port_info *p; + u8 autoneg_changed = 0; + enum ice_status status; + u64 phy_type_low; + int err = 0; + bool linkup; + + p = np->vsi->port_info; + + if (!p) + return -EOPNOTSUPP; + + /* Check if this is lan vsi */ + for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) { + if (pf->vsi[idx]->type == ICE_VSI_PF) { + if (np->vsi != pf->vsi[idx]) + return -EOPNOTSUPP; + break; + } + } + + if (p->phy.media_type != ICE_MEDIA_BASET && + p->phy.media_type != ICE_MEDIA_FIBER && + p->phy.media_type != ICE_MEDIA_BACKPLANE && + p->phy.media_type != ICE_MEDIA_DA && + p->phy.link_info.link_info & ICE_AQ_LINK_UP) + return -EOPNOTSUPP; + + /* copy the ksettings to copy_ks to avoid modifying the original */ + memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings)); + + /* save autoneg out of ksettings */ + autoneg = copy_ks.base.autoneg; + + memset(&safe_ks, 0, sizeof(safe_ks)); + + /* Get link modes supported by hardware.*/ + ice_phy_type_to_ethtool(netdev, &safe_ks); + + /* and check against modes requested by user. + * Return an error if unsupported mode was set. + */ + if (!bitmap_subset(copy_ks.link_modes.advertising, + safe_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + /* get our own copy of the bits to check against */ + memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); + safe_ks.base.cmd = copy_ks.base.cmd; + safe_ks.base.link_mode_masks_nwords = + copy_ks.base.link_mode_masks_nwords; + ice_get_link_ksettings(netdev, &safe_ks); + + /* set autoneg back to what it currently is */ + copy_ks.base.autoneg = safe_ks.base.autoneg; + /* we don't compare the speed */ + copy_ks.base.speed = safe_ks.base.speed; + + /* If copy_ks.base and safe_ks.base are not the same now, then they are + * trying to set something that we do not support. + */ + if (memcmp(©_ks.base, &safe_ks.base, + sizeof(struct ethtool_link_settings))) + return -EOPNOTSUPP; + + while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX); + } + + abilities = devm_kzalloc(&pf->pdev->dev, sizeof(*abilities), + GFP_KERNEL); + if (!abilities) + return -ENOMEM; + + /* Get the current phy config */ + status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities, + NULL); + if (status) { + err = -EAGAIN; + goto done; + } + + /* Copy abilities to config in case autoneg is not set below */ + memset(&config, 0, sizeof(struct ice_aqc_set_phy_cfg_data)); + config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE; + if (abilities->caps & ICE_AQC_PHY_AN_MODE) + config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + + /* Check autoneg */ + err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed, + netdev); + + if (err) + goto done; + + /* Call to get the current link speed */ + p->phy.get_link_info = true; + status = ice_get_link_status(p, &linkup); + if (status) { + err = -EAGAIN; + goto done; + } + + curr_link_speed = p->phy.link_info.link_speed; + adv_link_speed = ice_ksettings_find_adv_link_speed(ks); + + /* If speed didn't get set, set it to what it currently is. + * This is needed because if advertise is 0 (as it is when autoneg + * is disabled) then speed won't get set. + */ + if (!adv_link_speed) + adv_link_speed = curr_link_speed; + + /* Convert the advertise link speeds to their corresponded PHY_TYPE */ + ice_update_phy_type(&phy_type_low, adv_link_speed); + + if (!autoneg_changed && adv_link_speed == curr_link_speed) { + netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); + goto done; + } + + /* copy over the rest of the abilities */ + config.low_power_ctrl = abilities->low_power_ctrl; + config.eee_cap = abilities->eee_cap; + config.eeer_value = abilities->eeer_value; + config.link_fec_opt = abilities->link_fec_options; + + /* save the requested speeds */ + p->phy.link_info.req_speeds = adv_link_speed; + + /* set link and auto negotiation so changes take effect */ + config.caps |= ICE_AQ_PHY_ENA_LINK; + + if (phy_type_low) { + config.phy_type_low = cpu_to_le64(phy_type_low) & + abilities->phy_type_low; + } else { + err = -EAGAIN; + netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n"); + goto done; + } + + /* If link is up put link down */ + if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) { + /* Tell the OS link is going down, the link will go + * back up when fw says it is ready asynchronously + */ + ice_print_link_msg(np->vsi, false); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + + /* make the aq call */ + status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL); + if (status) { + netdev_info(netdev, "Set phy config failed,\n"); + err = -EAGAIN; + } + +done: + devm_kfree(&pf->pdev->dev, abilities); + clear_bit(__ICE_CFG_BUSY, pf->state); + + return err; +} + +/** * ice_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -478,9 +1198,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ring->tx_max_pending = ICE_MAX_NUM_DESC; ring->rx_pending = vsi->rx_rings[0]->count; ring->tx_pending = vsi->tx_rings[0]->count; - ring->rx_mini_pending = ICE_MIN_NUM_DESC; + + /* Rx mini and jumbo rings are not supported */ ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; + ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; } @@ -498,14 +1220,23 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ring->tx_pending < ICE_MIN_NUM_DESC || ring->rx_pending > ICE_MAX_NUM_DESC || ring->rx_pending < ICE_MIN_NUM_DESC) { - netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", ring->tx_pending, ring->rx_pending, - ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC); + ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC, + ICE_REQ_DESC_MULTIPLE); return -EINVAL; } new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE); + if (new_tx_cnt != ring->tx_pending) + netdev_info(netdev, + "Requested Tx descriptor count rounded up to %d\n", + new_tx_cnt); new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE); + if (new_rx_cnt != ring->rx_pending) + netdev_info(netdev, + "Requested Rx descriptor count rounded up to %d\n", + new_rx_cnt); /* if nothing to do return success */ if (new_tx_cnt == vsi->tx_rings[0]->count && @@ -933,6 +1664,7 @@ static int ice_set_rxfh(struct net_device *netdev, const u32 *indir, static const struct ethtool_ops ice_ethtool_ops = { .get_link_ksettings = ice_get_link_ksettings, + .set_link_ksettings = ice_set_link_ksettings, .get_drvinfo = ice_get_drvinfo, .get_regs_len = ice_get_regs_len, .get_regs = ice_get_regs, diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 6076fc87df9d..a6679a9bfd3a 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -6,251 +6,323 @@ #ifndef _ICE_HW_AUTOGEN_H_ #define _ICE_HW_AUTOGEN_H_ -#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) -#define PF_FW_ARQBAH 0x00080180 -#define PF_FW_ARQBAL 0x00080080 -#define PF_FW_ARQH 0x00080380 -#define PF_FW_ARQH_ARQH_S 0 -#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, PF_FW_ARQH_ARQH_S) -#define PF_FW_ARQLEN 0x00080280 -#define PF_FW_ARQLEN_ARQLEN_S 0 -#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, PF_FW_ARQLEN_ARQLEN_S) -#define PF_FW_ARQLEN_ARQVFE_S 28 -#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S) -#define PF_FW_ARQLEN_ARQOVFL_S 29 -#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S) -#define PF_FW_ARQLEN_ARQCRIT_S 30 -#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S) -#define PF_FW_ARQLEN_ARQENABLE_S 31 -#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S) -#define PF_FW_ARQT 0x00080480 -#define PF_FW_ATQBAH 0x00080100 -#define PF_FW_ATQBAL 0x00080000 -#define PF_FW_ATQH 0x00080300 -#define PF_FW_ATQH_ATQH_S 0 -#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, PF_FW_ATQH_ATQH_S) -#define PF_FW_ATQLEN 0x00080200 -#define PF_FW_ATQLEN_ATQLEN_S 0 -#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S) -#define PF_FW_ATQLEN_ATQVFE_S 28 -#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S) -#define PF_FW_ATQLEN_ATQOVFL_S 29 -#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S) -#define PF_FW_ATQLEN_ATQCRIT_S 30 -#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S) -#define PF_FW_ATQLEN_ATQENABLE_S 31 -#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S) -#define PF_FW_ATQT 0x00080400 - +#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) +#define PF_FW_ARQBAH 0x00080180 +#define PF_FW_ARQBAL 0x00080080 +#define PF_FW_ARQH 0x00080380 +#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, 0) +#define PF_FW_ARQLEN 0x00080280 +#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0) +#define PF_FW_ARQLEN_ARQVFE_M BIT(28) +#define PF_FW_ARQLEN_ARQOVFL_M BIT(29) +#define PF_FW_ARQLEN_ARQCRIT_M BIT(30) +#define PF_FW_ARQLEN_ARQENABLE_M BIT(31) +#define PF_FW_ARQT 0x00080480 +#define PF_FW_ATQBAH 0x00080100 +#define PF_FW_ATQBAL 0x00080000 +#define PF_FW_ATQH 0x00080300 +#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, 0) +#define PF_FW_ATQLEN 0x00080200 +#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0) +#define PF_FW_ATQLEN_ATQVFE_M BIT(28) +#define PF_FW_ATQLEN_ATQOVFL_M BIT(29) +#define PF_FW_ATQLEN_ATQCRIT_M BIT(30) +#define PF_FW_ATQLEN_ATQENABLE_M BIT(31) +#define PF_FW_ATQT 0x00080400 +#define PF_MBX_ARQBAH 0x0022E400 +#define PF_MBX_ARQBAL 0x0022E380 +#define PF_MBX_ARQH 0x0022E500 +#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0) +#define PF_MBX_ARQLEN 0x0022E480 +#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0) +#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31) +#define PF_MBX_ARQT 0x0022E580 +#define PF_MBX_ATQBAH 0x0022E180 +#define PF_MBX_ATQBAL 0x0022E100 +#define PF_MBX_ATQH 0x0022E280 +#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0) +#define PF_MBX_ATQLEN 0x0022E200 +#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0) +#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31) +#define PF_MBX_ATQT 0x0022E300 #define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 -#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8 -#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, 8) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16 -#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, 16) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24 -#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, 24) #define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0 -#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30 -#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S) +#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045c900 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0 -#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30 -#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S) +#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045ca00 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0 -#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30 -#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S) +#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045cb00 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0 -#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30 -#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S) - -#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) -#define QRXFLXP_CNTXT_RXDID_IDX_S 0 -#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, QRXFLXP_CNTXT_RXDID_IDX_S) -#define QRXFLXP_CNTXT_RXDID_PRIO_S 8 -#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, QRXFLXP_CNTXT_RXDID_PRIO_S) -#define QRXFLXP_CNTXT_TS_S 11 -#define QRXFLXP_CNTXT_TS_M BIT(QRXFLXP_CNTXT_TS_S) -#define GLGEN_RSTAT 0x000B8188 -#define GLGEN_RSTAT_DEVSTATE_S 0 -#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S) -#define GLGEN_RSTCTL 0x000B8180 -#define GLGEN_RSTCTL_GRSTDEL_S 0 -#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S) -#define GLGEN_RSTAT_RESET_TYPE_S 2 -#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, GLGEN_RSTAT_RESET_TYPE_S) -#define GLGEN_RTRIG 0x000B8190 -#define GLGEN_RTRIG_CORER_S 0 -#define GLGEN_RTRIG_CORER_M BIT(GLGEN_RTRIG_CORER_S) -#define GLGEN_RTRIG_GLOBR_S 1 -#define GLGEN_RTRIG_GLOBR_M BIT(GLGEN_RTRIG_GLOBR_S) -#define GLGEN_STAT 0x000B612C -#define PFGEN_CTRL 0x00091000 -#define PFGEN_CTRL_PFSWR_S 0 -#define PFGEN_CTRL_PFSWR_M BIT(PFGEN_CTRL_PFSWR_S) -#define PFGEN_STATE 0x00088000 -#define PRTGEN_STATUS 0x000B8100 -#define PFHMC_ERRORDATA 0x00520500 -#define PFHMC_ERRORINFO 0x00520400 -#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) -#define GLINT_DYN_CTL_INTENA_S 0 -#define GLINT_DYN_CTL_INTENA_M BIT(GLINT_DYN_CTL_INTENA_S) -#define GLINT_DYN_CTL_CLEARPBA_S 1 -#define GLINT_DYN_CTL_CLEARPBA_M BIT(GLINT_DYN_CTL_CLEARPBA_S) -#define GLINT_DYN_CTL_SWINT_TRIG_S 2 -#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(GLINT_DYN_CTL_SWINT_TRIG_S) -#define GLINT_DYN_CTL_ITR_INDX_S 3 -#define GLINT_DYN_CTL_SW_ITR_INDX_S 25 -#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S) -#define GLINT_DYN_CTL_INTENA_MSK_S 31 -#define GLINT_DYN_CTL_INTENA_MSK_M BIT(GLINT_DYN_CTL_INTENA_MSK_S) -#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) -#define PFINT_FW_CTL 0x0016C800 -#define PFINT_FW_CTL_MSIX_INDX_S 0 -#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_FW_CTL_MSIX_INDX_S) -#define PFINT_FW_CTL_ITR_INDX_S 11 -#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, PFINT_FW_CTL_ITR_INDX_S) -#define PFINT_FW_CTL_CAUSE_ENA_S 30 -#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) -#define PFINT_OICR 0x0016CA00 -#define PFINT_OICR_ECC_ERR_S 16 -#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) -#define PFINT_OICR_MAL_DETECT_S 19 -#define PFINT_OICR_MAL_DETECT_M BIT(PFINT_OICR_MAL_DETECT_S) -#define PFINT_OICR_GRST_S 20 -#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) -#define PFINT_OICR_PCI_EXCEPTION_S 21 -#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) -#define PFINT_OICR_HMC_ERR_S 26 -#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) -#define PFINT_OICR_PE_CRITERR_S 28 -#define PFINT_OICR_PE_CRITERR_M BIT(PFINT_OICR_PE_CRITERR_S) -#define PFINT_OICR_CTL 0x0016CA80 -#define PFINT_OICR_CTL_MSIX_INDX_S 0 -#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_OICR_CTL_MSIX_INDX_S) -#define PFINT_OICR_CTL_ITR_INDX_S 11 -#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, PFINT_OICR_CTL_ITR_INDX_S) -#define PFINT_OICR_CTL_CAUSE_ENA_S 30 -#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(PFINT_OICR_CTL_CAUSE_ENA_S) -#define PFINT_OICR_ENA 0x0016C900 -#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) -#define QINT_RQCTL_MSIX_INDX_S 0 -#define QINT_RQCTL_ITR_INDX_S 11 -#define QINT_RQCTL_CAUSE_ENA_S 30 -#define QINT_RQCTL_CAUSE_ENA_M BIT(QINT_RQCTL_CAUSE_ENA_S) -#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4)) -#define QINT_TQCTL_MSIX_INDX_S 0 -#define QINT_TQCTL_ITR_INDX_S 11 -#define QINT_TQCTL_CAUSE_ENA_S 30 -#define QINT_TQCTL_CAUSE_ENA_M BIT(QINT_TQCTL_CAUSE_ENA_S) -#define GLLAN_RCTL_0 0x002941F8 -#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) -#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) -#define QRX_CTRL_MAX_INDEX 2047 -#define QRX_CTRL_QENA_REQ_S 0 -#define QRX_CTRL_QENA_REQ_M BIT(QRX_CTRL_QENA_REQ_S) -#define QRX_CTRL_QENA_STAT_S 2 -#define QRX_CTRL_QENA_STAT_M BIT(QRX_CTRL_QENA_STAT_S) -#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4)) -#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4)) -#define GLNVM_FLA 0x000B6108 -#define GLNVM_FLA_LOCKED_S 6 -#define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S) -#define GLNVM_GENS 0x000B6100 -#define GLNVM_GENS_SR_SIZE_S 5 -#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, GLNVM_GENS_SR_SIZE_S) -#define GLNVM_ULD 0x000B6008 -#define GLNVM_ULD_CORER_DONE_S 3 -#define GLNVM_ULD_CORER_DONE_M BIT(GLNVM_ULD_CORER_DONE_S) -#define GLNVM_ULD_GLOBR_DONE_S 4 -#define GLNVM_ULD_GLOBR_DONE_M BIT(GLNVM_ULD_GLOBR_DONE_S) -#define PF_FUNC_RID 0x0009E880 -#define PF_FUNC_RID_FUNC_NUM_S 0 -#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, PF_FUNC_RID_FUNC_NUM_S) -#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) -#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) -#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) -#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) -#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) -#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) -#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) -#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) -#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) -#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) -#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) -#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8)) -#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) -#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) -#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) -#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) -#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) -#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) -#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) -#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) -#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) -#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) -#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) -#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) -#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) -#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) -#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) -#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) -#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) -#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) -#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) -#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) -#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) -#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) -#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) -#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) -#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) -#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) -#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) -#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) -#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) -#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) -#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) -#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) -#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) -#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) -#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) -#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) -#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8)) -#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8)) -#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8)) -#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8)) -#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) -#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) -#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) -#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) -#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) -#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) -#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) -#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) -#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) -#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) -#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) -#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) -#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) -#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) -#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) -#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) -#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) -#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) -#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) -#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) -#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) -#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) -#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) -#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) -#define VSIQF_HKEY_MAX_INDEX 12 +#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, 30) +#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) +#define QRXFLXP_CNTXT_RXDID_IDX_S 0 +#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0) +#define QRXFLXP_CNTXT_RXDID_PRIO_S 8 +#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8) +#define GLGEN_RSTAT 0x000B8188 +#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0) +#define GLGEN_RSTCTL 0x000B8180 +#define GLGEN_RSTCTL_GRSTDEL_S 0 +#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S) +#define GLGEN_RSTAT_RESET_TYPE_S 2 +#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, 2) +#define GLGEN_RTRIG 0x000B8190 +#define GLGEN_RTRIG_CORER_M BIT(0) +#define GLGEN_RTRIG_GLOBR_M BIT(1) +#define GLGEN_STAT 0x000B612C +#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4)) +#define PFGEN_CTRL 0x00091000 +#define PFGEN_CTRL_PFSWR_M BIT(0) +#define PFGEN_STATE 0x00088000 +#define PRTGEN_STATUS 0x000B8100 +#define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4)) +#define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4)) +#define VPGEN_VFRSTAT_VFRD_M BIT(0) +#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4)) +#define VPGEN_VFRTRIG_VFSWR_M BIT(0) +#define PFHMC_ERRORDATA 0x00520500 +#define PFHMC_ERRORINFO 0x00520400 +#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) +#define GLINT_DYN_CTL_INTENA_M BIT(0) +#define GLINT_DYN_CTL_CLEARPBA_M BIT(1) +#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2) +#define GLINT_DYN_CTL_ITR_INDX_S 3 +#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25) +#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) +#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) +#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) +#define GLINT_RATE_INTRL_ENA_M BIT(6) +#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4)) +#define GLINT_VECT2FUNC_VF_NUM_S 0 +#define GLINT_VECT2FUNC_VF_NUM_M ICE_M(0xFF, 0) +#define GLINT_VECT2FUNC_PF_NUM_S 12 +#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12) +#define GLINT_VECT2FUNC_IS_PF_S 16 +#define GLINT_VECT2FUNC_IS_PF_M BIT(16) +#define PFINT_FW_CTL 0x0016C800 +#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) +#define PFINT_FW_CTL_ITR_INDX_S 11 +#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11) +#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_MBX_CTL 0x0016B280 +#define PFINT_MBX_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) +#define PFINT_MBX_CTL_ITR_INDX_S 11 +#define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11) +#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_OICR 0x0016CA00 +#define PFINT_OICR_ECC_ERR_M BIT(16) +#define PFINT_OICR_MAL_DETECT_M BIT(19) +#define PFINT_OICR_GRST_M BIT(20) +#define PFINT_OICR_PCI_EXCEPTION_M BIT(21) +#define PFINT_OICR_HMC_ERR_M BIT(26) +#define PFINT_OICR_PE_CRITERR_M BIT(28) +#define PFINT_OICR_VFLR_M BIT(29) +#define PFINT_OICR_CTL 0x0016CA80 +#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) +#define PFINT_OICR_CTL_ITR_INDX_S 11 +#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, 11) +#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_OICR_ENA 0x0016C900 +#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) +#define QINT_RQCTL_MSIX_INDX_S 0 +#define QINT_RQCTL_ITR_INDX_S 11 +#define QINT_RQCTL_CAUSE_ENA_M BIT(30) +#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4)) +#define QINT_TQCTL_MSIX_INDX_S 0 +#define QINT_TQCTL_ITR_INDX_S 11 +#define QINT_TQCTL_CAUSE_ENA_M BIT(30) +#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4)) +#define VPINT_ALLOC_FIRST_S 0 +#define VPINT_ALLOC_FIRST_M ICE_M(0x7FF, 0) +#define VPINT_ALLOC_LAST_S 12 +#define VPINT_ALLOC_LAST_M ICE_M(0x7FF, 12) +#define VPINT_ALLOC_VALID_M BIT(31) +#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) +#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) +#define QRX_CTRL_MAX_INDEX 2047 +#define QRX_CTRL_QENA_REQ_S 0 +#define QRX_CTRL_QENA_REQ_M BIT(0) +#define QRX_CTRL_QENA_STAT_S 2 +#define QRX_CTRL_QENA_STAT_M BIT(2) +#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4)) +#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4)) +#define QRX_TAIL_MAX_INDEX 2047 +#define QRX_TAIL_TAIL_S 0 +#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0) +#define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4)) +#define VPLAN_RX_QBASE_VFFIRSTQ_S 0 +#define VPLAN_RX_QBASE_VFFIRSTQ_M ICE_M(0x7FF, 0) +#define VPLAN_RX_QBASE_VFNUMQ_S 16 +#define VPLAN_RX_QBASE_VFNUMQ_M ICE_M(0xFF, 16) +#define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4)) +#define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0) +#define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4)) +#define VPLAN_TX_QBASE_VFFIRSTQ_S 0 +#define VPLAN_TX_QBASE_VFFIRSTQ_M ICE_M(0x3FFF, 0) +#define VPLAN_TX_QBASE_VFNUMQ_S 16 +#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16) +#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4)) +#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0) +#define GL_MDET_RX 0x00294C00 +#define GL_MDET_RX_QNUM_S 0 +#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0) +#define GL_MDET_RX_VF_NUM_S 15 +#define GL_MDET_RX_VF_NUM_M ICE_M(0xFF, 15) +#define GL_MDET_RX_PF_NUM_S 23 +#define GL_MDET_RX_PF_NUM_M ICE_M(0x7, 23) +#define GL_MDET_RX_MAL_TYPE_S 26 +#define GL_MDET_RX_MAL_TYPE_M ICE_M(0x1F, 26) +#define GL_MDET_RX_VALID_M BIT(31) +#define GL_MDET_TX_PQM 0x002D2E00 +#define GL_MDET_TX_PQM_PF_NUM_S 0 +#define GL_MDET_TX_PQM_PF_NUM_M ICE_M(0x7, 0) +#define GL_MDET_TX_PQM_VF_NUM_S 4 +#define GL_MDET_TX_PQM_VF_NUM_M ICE_M(0xFF, 4) +#define GL_MDET_TX_PQM_QNUM_S 12 +#define GL_MDET_TX_PQM_QNUM_M ICE_M(0x3FFF, 12) +#define GL_MDET_TX_PQM_MAL_TYPE_S 26 +#define GL_MDET_TX_PQM_MAL_TYPE_M ICE_M(0x1F, 26) +#define GL_MDET_TX_PQM_VALID_M BIT(31) +#define GL_MDET_TX_TCLAN 0x000FC068 +#define GL_MDET_TX_TCLAN_QNUM_S 0 +#define GL_MDET_TX_TCLAN_QNUM_M ICE_M(0x7FFF, 0) +#define GL_MDET_TX_TCLAN_VF_NUM_S 15 +#define GL_MDET_TX_TCLAN_VF_NUM_M ICE_M(0xFF, 15) +#define GL_MDET_TX_TCLAN_PF_NUM_S 23 +#define GL_MDET_TX_TCLAN_PF_NUM_M ICE_M(0x7, 23) +#define GL_MDET_TX_TCLAN_MAL_TYPE_S 26 +#define GL_MDET_TX_TCLAN_MAL_TYPE_M ICE_M(0x1F, 26) +#define GL_MDET_TX_TCLAN_VALID_M BIT(31) +#define PF_MDET_RX 0x00294280 +#define PF_MDET_RX_VALID_M BIT(0) +#define PF_MDET_TX_PQM 0x002D2C80 +#define PF_MDET_TX_PQM_VALID_M BIT(0) +#define PF_MDET_TX_TCLAN 0x000FC000 +#define PF_MDET_TX_TCLAN_VALID_M BIT(0) +#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4)) +#define VP_MDET_RX_VALID_M BIT(0) +#define VP_MDET_TX_PQM(_VF) (0x002D2000 + ((_VF) * 4)) +#define VP_MDET_TX_PQM_VALID_M BIT(0) +#define VP_MDET_TX_TCLAN(_VF) (0x000FB800 + ((_VF) * 4)) +#define VP_MDET_TX_TCLAN_VALID_M BIT(0) +#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4)) +#define VP_MDET_TX_TDPU_VALID_M BIT(0) +#define GLNVM_FLA 0x000B6108 +#define GLNVM_FLA_LOCKED_M BIT(6) +#define GLNVM_GENS 0x000B6100 +#define GLNVM_GENS_SR_SIZE_S 5 +#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5) +#define GLNVM_ULD 0x000B6008 +#define GLNVM_ULD_CORER_DONE_M BIT(3) +#define GLNVM_ULD_GLOBR_DONE_M BIT(4) +#define PF_FUNC_RID 0x0009E880 +#define PF_FUNC_RID_FUNC_NUM_S 0 +#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0) +#define PF_PCI_CIAA 0x0009E580 +#define PF_PCI_CIAA_VF_NUM_S 12 +#define PF_PCI_CIAD 0x0009E500 +#define GL_PWR_MODE_CTL 0x000B820C +#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 +#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) +#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) +#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) +#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) +#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) +#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) +#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) +#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) +#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) +#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) +#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) +#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) +#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8)) +#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) +#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) +#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) +#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) +#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) +#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) +#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) +#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) +#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) +#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) +#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) +#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) +#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) +#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) +#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) +#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) +#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) +#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) +#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) +#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) +#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) +#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) +#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) +#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) +#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) +#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) +#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) +#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) +#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) +#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) +#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) +#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) +#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) +#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) +#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) +#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) +#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8)) +#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8)) +#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8)) +#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8)) +#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) +#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) +#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) +#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) +#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) +#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) +#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) +#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) +#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) +#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) +#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) +#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) +#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) +#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) +#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) +#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) +#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) +#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) +#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) +#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) +#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) +#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) +#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) +#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) +#define VSIQF_HKEY_MAX_INDEX 12 +#define VSIQF_HLUT_MAX_INDEX 15 +#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) +#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 068dbc740b76..7d2a66739e3f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -188,23 +188,25 @@ struct ice_32b_rx_flex_desc_nic { * with a specific metadata (profile 7 reserved for HW) */ enum ice_rxdid { - ICE_RXDID_START = 0, - ICE_RXDID_LEGACY_0 = ICE_RXDID_START, - ICE_RXDID_LEGACY_1, - ICE_RXDID_FLX_START, - ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START, - ICE_RXDID_FLX_LAST = 63, - ICE_RXDID_LAST = ICE_RXDID_FLX_LAST + ICE_RXDID_LEGACY_0 = 0, + ICE_RXDID_LEGACY_1 = 1, + ICE_RXDID_FLEX_NIC = 2, + ICE_RXDID_FLEX_NIC_2 = 6, + ICE_RXDID_HW = 7, + ICE_RXDID_LAST = 63, }; /* Receive Flex Descriptor Rx opcode values */ #define ICE_RX_OPC_MDID 0x01 /* Receive Descriptor MDID values */ -#define ICE_RX_MDID_FLOW_ID_LOWER 5 -#define ICE_RX_MDID_FLOW_ID_HIGH 6 -#define ICE_RX_MDID_HASH_LOW 56 -#define ICE_RX_MDID_HASH_HIGH 57 +enum ice_flex_rx_mdid { + ICE_RX_MDID_FLOW_ID_LOWER = 5, + ICE_RX_MDID_FLOW_ID_HIGH, + ICE_RX_MDID_SRC_VSI = 19, + ICE_RX_MDID_HASH_LOW = 56, + ICE_RX_MDID_HASH_HIGH, +}; /* Rx Flag64 packet flag bits */ enum ice_rx_flg64_bits { @@ -416,6 +418,7 @@ struct ice_tlan_ctx { u8 pf_num; u16 vmvf_num; u8 vmvf_type; +#define ICE_TLAN_CTX_VMVF_TYPE_VF 0 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 #define ICE_TLAN_CTX_VMVF_TYPE_PF 2 u16 src_vsi; @@ -471,4 +474,16 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) { return ice_ptype_lkup[ptype]; } + +#define ICE_LINK_SPEED_UNKNOWN 0 +#define ICE_LINK_SPEED_10MBPS 10 +#define ICE_LINK_SPEED_100MBPS 100 +#define ICE_LINK_SPEED_1000MBPS 1000 +#define ICE_LINK_SPEED_2500MBPS 2500 +#define ICE_LINK_SPEED_5000MBPS 5000 +#define ICE_LINK_SPEED_10000MBPS 10000 +#define ICE_LINK_SPEED_20000MBPS 20000 +#define ICE_LINK_SPEED_25000MBPS 25000 +#define ICE_LINK_SPEED_40000MBPS 40000 + #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c new file mode 100644 index 000000000000..49f1940772ed --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -0,0 +1,2619 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" + +/** + * ice_setup_rx_ctx - Configure a receive ring context + * @ring: The Rx ring to configure + * + * Configure the Rx descriptor ring in RLAN context. + */ +static int ice_setup_rx_ctx(struct ice_ring *ring) +{ + struct ice_vsi *vsi = ring->vsi; + struct ice_hw *hw = &vsi->back->hw; + u32 rxdid = ICE_RXDID_FLEX_NIC; + struct ice_rlan_ctx rlan_ctx; + u32 regval; + u16 pf_q; + int err; + + /* what is RX queue number in global space of 2K Rx queues */ + pf_q = vsi->rxq_map[ring->q_index]; + + /* clear the context structure first */ + memset(&rlan_ctx, 0, sizeof(rlan_ctx)); + + rlan_ctx.base = ring->dma >> 7; + + rlan_ctx.qlen = ring->count; + + /* Receive Packet Data Buffer Size. + * The Packet Data Buffer Size is defined in 128 byte units. + */ + rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; + + /* use 32 byte descriptors */ + rlan_ctx.dsize = 1; + + /* Strip the Ethernet CRC bytes before the packet is posted to host + * memory. + */ + rlan_ctx.crcstrip = 1; + + /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ + rlan_ctx.l2tsel = 1; + + rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; + rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; + rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; + + /* This controls whether VLAN is stripped from inner headers + * The VLAN in the inner L2 header is stripped to the receive + * descriptor if enabled by this flag. + */ + rlan_ctx.showiv = 0; + + /* Max packet size for this queue - must not be set to a larger value + * than 5 x DBUF + */ + rlan_ctx.rxmax = min_t(u16, vsi->max_frame, + ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); + + /* Rx queue threshold in units of 64 */ + rlan_ctx.lrxqthresh = 1; + + /* Enable Flexible Descriptors in the queue context which + * allows this driver to select a specific receive descriptor format + */ + if (vsi->type != ICE_VSI_VF) { + regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); + regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & + QRXFLXP_CNTXT_RXDID_IDX_M; + + /* increasing context priority to pick up profile id; + * default is 0x01; setting to 0x03 to ensure profile + * is programming if prev context is of same priority + */ + regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & + QRXFLXP_CNTXT_RXDID_PRIO_M; + + wr32(hw, QRXFLXP_CNTXT(pf_q), regval); + } + + /* Absolute queue number out of 2K needs to be passed */ + err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); + if (err) { + dev_err(&vsi->back->pdev->dev, + "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", + pf_q, err); + return -EIO; + } + + if (vsi->type == ICE_VSI_VF) + return 0; + + /* init queue specific tail register */ + ring->tail = hw->hw_addr + QRX_TAIL(pf_q); + writel(0, ring->tail); + ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); + + return 0; +} + +/** + * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance + * @ring: The Tx ring to configure + * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized + * @pf_q: queue index in the PF space + * + * Configure the Tx descriptor ring in TLAN context. + */ +static void +ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) +{ + struct ice_vsi *vsi = ring->vsi; + struct ice_hw *hw = &vsi->back->hw; + + tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; + + tlan_ctx->port_num = vsi->port_info->lport; + + /* Transmit Queue Length */ + tlan_ctx->qlen = ring->count; + + /* PF number */ + tlan_ctx->pf_num = hw->pf_id; + + /* queue belongs to a specific VSI type + * VF / VM index should be programmed per vmvf_type setting: + * for vmvf_type = VF, it is VF number between 0-256 + * for vmvf_type = VM, it is VM number between 0-767 + * for PF or EMP this field should be set to zero + */ + switch (vsi->type) { + case ICE_VSI_PF: + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; + break; + case ICE_VSI_VF: + /* Firmware expects vmvf_num to be absolute VF id */ + tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; + break; + default: + return; + } + + /* make sure the context is associated with the right VSI */ + tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); + + tlan_ctx->tso_ena = ICE_TX_LEGACY; + tlan_ctx->tso_qnum = pf_q; + + /* Legacy or Advanced Host Interface: + * 0: Advanced Host Interface + * 1: Legacy Host Interface + */ + tlan_ctx->legacy_int = ICE_TX_LEGACY; +} + +/** + * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled + * @pf: the PF being configured + * @pf_q: the PF queue + * @ena: enable or disable state of the queue + * + * This routine will wait for the given Rx queue of the PF to reach the + * enabled or disabled state. + * Returns -ETIMEDOUT in case of failing to reach the requested state after + * multiple retries; else will return 0 in case of success. + */ +static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) +{ + int i; + + for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { + u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); + + if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) + break; + + usleep_range(10, 20); + } + if (i >= ICE_Q_WAIT_RETRY_LIMIT) + return -ETIMEDOUT; + + return 0; +} + +/** + * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings + * @vsi: the VSI being configured + * @ena: start or stop the Rx rings + */ +static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + int i, j, ret = 0; + + for (i = 0; i < vsi->num_rxq; i++) { + int pf_q = vsi->rxq_map[i]; + u32 rx_reg; + + for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { + rx_reg = rd32(hw, QRX_CTRL(pf_q)); + if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == + ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) + break; + usleep_range(1000, 2000); + } + + /* Skip if the queue is already in the requested state */ + if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) + continue; + + /* turn on/off the queue */ + if (ena) + rx_reg |= QRX_CTRL_QENA_REQ_M; + else + rx_reg &= ~QRX_CTRL_QENA_REQ_M; + wr32(hw, QRX_CTRL(pf_q), rx_reg); + + /* wait for the change to finish */ + ret = ice_pf_rxq_wait(pf, pf_q, ena); + if (ret) { + dev_err(&pf->pdev->dev, + "VSI idx %d Rx ring %d %sable timeout\n", + vsi->idx, pf_q, (ena ? "en" : "dis")); + break; + } + } + + return ret; +} + +/** + * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI + * @vsi: VSI pointer + * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. + * + * On error: returns error code (negative) + * On success: returns 0 + */ +static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors) +{ + struct ice_pf *pf = vsi->back; + + /* allocate memory for both Tx and Rx ring pointers */ + vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, + sizeof(struct ice_ring *), GFP_KERNEL); + if (!vsi->tx_rings) + goto err_txrings; + + vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, + sizeof(struct ice_ring *), GFP_KERNEL); + if (!vsi->rx_rings) + goto err_rxrings; + + if (alloc_qvectors) { + /* allocate memory for q_vector pointers */ + vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, + vsi->num_q_vectors, + sizeof(struct ice_q_vector *), + GFP_KERNEL); + if (!vsi->q_vectors) + goto err_vectors; + } + + return 0; + +err_vectors: + devm_kfree(&pf->pdev->dev, vsi->rx_rings); +err_rxrings: + devm_kfree(&pf->pdev->dev, vsi->tx_rings); +err_txrings: + return -ENOMEM; +} + +/** + * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + */ +static void ice_vsi_set_num_qs(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + + switch (vsi->type) { + case ICE_VSI_PF: + vsi->alloc_txq = pf->num_lan_tx; + vsi->alloc_rxq = pf->num_lan_rx; + vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); + vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); + break; + case ICE_VSI_VF: + vsi->alloc_txq = pf->num_vf_qps; + vsi->alloc_rxq = pf->num_vf_qps; + /* pf->num_vf_msix includes (VF miscellaneous vector + + * data queue interrupts). Since vsi->num_q_vectors is number + * of queues vectors, subtract 1 from the original vector + * count + */ + vsi->num_q_vectors = pf->num_vf_msix - 1; + break; + default: + dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", + vsi->type); + break; + } +} + +/** + * ice_get_free_slot - get the next non-NULL location index in array + * @array: array to search + * @size: size of the array + * @curr: last known occupied index to be used as a search hint + * + * void * is being used to keep the functionality generic. This lets us use this + * function on any array of pointers. + */ +static int ice_get_free_slot(void *array, int size, int curr) +{ + int **tmp_array = (int **)array; + int next; + + if (curr < (size - 1) && !tmp_array[curr + 1]) { + next = curr + 1; + } else { + int i = 0; + + while ((i < size) && (tmp_array[i])) + i++; + if (i == size) + next = ICE_NO_VSI; + else + next = i; + } + return next; +} + +/** + * ice_vsi_delete - delete a VSI from the switch + * @vsi: pointer to VSI being removed + */ +void ice_vsi_delete(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_vsi_ctx ctxt; + enum ice_status status; + + if (vsi->type == ICE_VSI_VF) + ctxt.vf_num = vsi->vf_id; + ctxt.vsi_num = vsi->vsi_num; + + memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); + + status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL); + if (status) + dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", + vsi->vsi_num); +} + +/** + * ice_vsi_free_arrays - clean up VSI resources + * @vsi: pointer to VSI being cleared + * @free_qvectors: bool to specify if q_vectors should be deallocated + */ +static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors) +{ + struct ice_pf *pf = vsi->back; + + /* free the ring and vector containers */ + if (free_qvectors && vsi->q_vectors) { + devm_kfree(&pf->pdev->dev, vsi->q_vectors); + vsi->q_vectors = NULL; + } + if (vsi->tx_rings) { + devm_kfree(&pf->pdev->dev, vsi->tx_rings); + vsi->tx_rings = NULL; + } + if (vsi->rx_rings) { + devm_kfree(&pf->pdev->dev, vsi->rx_rings); + vsi->rx_rings = NULL; + } +} + +/** + * ice_vsi_clear - clean up and deallocate the provided VSI + * @vsi: pointer to VSI being cleared + * + * This deallocates the VSI's queue resources, removes it from the PF's + * VSI array if necessary, and deallocates the VSI + * + * Returns 0 on success, negative on failure + */ +int ice_vsi_clear(struct ice_vsi *vsi) +{ + struct ice_pf *pf = NULL; + + if (!vsi) + return 0; + + if (!vsi->back) + return -EINVAL; + + pf = vsi->back; + + if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { + dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", + vsi->idx); + return -EINVAL; + } + + mutex_lock(&pf->sw_mutex); + /* updates the PF for this cleared VSI */ + + pf->vsi[vsi->idx] = NULL; + if (vsi->idx < pf->next_vsi) + pf->next_vsi = vsi->idx; + + ice_vsi_free_arrays(vsi, true); + mutex_unlock(&pf->sw_mutex); + devm_kfree(&pf->pdev->dev, vsi); + + return 0; +} + +/** + * ice_msix_clean_rings - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) +{ + struct ice_q_vector *q_vector = (struct ice_q_vector *)data; + + if (!q_vector->tx.ring && !q_vector->rx.ring) + return IRQ_HANDLED; + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ice_vsi_alloc - Allocates the next available struct VSI in the PF + * @pf: board private structure + * @type: type of VSI + * + * returns a pointer to a VSI on success, NULL on failure. + */ +static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) +{ + struct ice_vsi *vsi = NULL; + + /* Need to protect the allocation of the VSIs at the PF level */ + mutex_lock(&pf->sw_mutex); + + /* If we have already allocated our maximum number of VSIs, + * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index + * is available to be populated + */ + if (pf->next_vsi == ICE_NO_VSI) { + dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); + goto unlock_pf; + } + + vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); + if (!vsi) + goto unlock_pf; + + vsi->type = type; + vsi->back = pf; + set_bit(__ICE_DOWN, vsi->state); + vsi->idx = pf->next_vsi; + vsi->work_lmt = ICE_DFLT_IRQ_WORK; + + ice_vsi_set_num_qs(vsi); + + switch (vsi->type) { + case ICE_VSI_PF: + if (ice_vsi_alloc_arrays(vsi, true)) + goto err_rings; + + /* Setup default MSIX irq handler for VSI */ + vsi->irq_handler = ice_msix_clean_rings; + break; + case ICE_VSI_VF: + if (ice_vsi_alloc_arrays(vsi, true)) + goto err_rings; + break; + default: + dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); + goto unlock_pf; + } + + /* fill VSI slot in the PF struct */ + pf->vsi[pf->next_vsi] = vsi; + + /* prepare pf->next_vsi for next use */ + pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, + pf->next_vsi); + goto unlock_pf; + +err_rings: + devm_kfree(&pf->pdev->dev, vsi); + vsi = NULL; +unlock_pf: + mutex_unlock(&pf->sw_mutex); + return vsi; +} + +/** + * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI + * @vsi: the VSI getting queues + * + * Return 0 on success and a negative value on error + */ +static int ice_vsi_get_qs_contig(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int offset, ret = 0; + + mutex_lock(&pf->avail_q_mutex); + /* look for contiguous block of queues for Tx */ + offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS, + 0, vsi->alloc_txq, 0); + if (offset < ICE_MAX_TXQS) { + int i; + + bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq); + for (i = 0; i < vsi->alloc_txq; i++) + vsi->txq_map[i] = i + offset; + } else { + ret = -ENOMEM; + vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER; + } + + /* look for contiguous block of queues for Rx */ + offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS, + 0, vsi->alloc_rxq, 0); + if (offset < ICE_MAX_RXQS) { + int i; + + bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq); + for (i = 0; i < vsi->alloc_rxq; i++) + vsi->rxq_map[i] = i + offset; + } else { + ret = -ENOMEM; + vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER; + } + mutex_unlock(&pf->avail_q_mutex); + + return ret; +} + +/** + * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI + * @vsi: the VSI getting queues + * + * Return 0 on success and a negative value on error + */ +static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int i, index = 0; + + mutex_lock(&pf->avail_q_mutex); + + if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) { + for (i = 0; i < vsi->alloc_txq; i++) { + index = find_next_zero_bit(pf->avail_txqs, + ICE_MAX_TXQS, index); + if (index < ICE_MAX_TXQS) { + set_bit(index, pf->avail_txqs); + vsi->txq_map[i] = index; + } else { + goto err_scatter_tx; + } + } + } + + if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) { + for (i = 0; i < vsi->alloc_rxq; i++) { + index = find_next_zero_bit(pf->avail_rxqs, + ICE_MAX_RXQS, index); + if (index < ICE_MAX_RXQS) { + set_bit(index, pf->avail_rxqs); + vsi->rxq_map[i] = index; + } else { + goto err_scatter_rx; + } + } + } + + mutex_unlock(&pf->avail_q_mutex); + return 0; + +err_scatter_rx: + /* unflag any queues we have grabbed (i is failed position) */ + for (index = 0; index < i; index++) { + clear_bit(vsi->rxq_map[index], pf->avail_rxqs); + vsi->rxq_map[index] = 0; + } + i = vsi->alloc_txq; +err_scatter_tx: + /* i is either position of failed attempt or vsi->alloc_txq */ + for (index = 0; index < i; index++) { + clear_bit(vsi->txq_map[index], pf->avail_txqs); + vsi->txq_map[index] = 0; + } + + mutex_unlock(&pf->avail_q_mutex); + return -ENOMEM; +} + +/** + * ice_vsi_get_qs - Assign queues from PF to VSI + * @vsi: the VSI to assign queues to + * + * Returns 0 on success and a negative value on error + */ +static int ice_vsi_get_qs(struct ice_vsi *vsi) +{ + int ret = 0; + + vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; + vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; + + /* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping + * modes individually to scatter if assigning contiguous queues + * to Rx or Tx fails + */ + ret = ice_vsi_get_qs_contig(vsi); + if (ret < 0) { + if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) + vsi->alloc_txq = max_t(u16, vsi->alloc_txq, + ICE_MAX_SCATTER_TXQS); + if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) + vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq, + ICE_MAX_SCATTER_RXQS); + ret = ice_vsi_get_qs_scatter(vsi); + } + + return ret; +} + +/** + * ice_vsi_put_qs - Release queues from VSI to PF + * @vsi: the VSI that is going to release queues + */ +void ice_vsi_put_qs(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int i; + + mutex_lock(&pf->avail_q_mutex); + + for (i = 0; i < vsi->alloc_txq; i++) { + clear_bit(vsi->txq_map[i], pf->avail_txqs); + vsi->txq_map[i] = ICE_INVAL_Q_INDEX; + } + + for (i = 0; i < vsi->alloc_rxq; i++) { + clear_bit(vsi->rxq_map[i], pf->avail_rxqs); + vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; + } + + mutex_unlock(&pf->avail_q_mutex); +} + +/** + * ice_rss_clean - Delete RSS related VSI structures that hold user inputs + * @vsi: the VSI being removed + */ +static void ice_rss_clean(struct ice_vsi *vsi) +{ + struct ice_pf *pf; + + pf = vsi->back; + + if (vsi->rss_hkey_user) + devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); + if (vsi->rss_lut_user) + devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); +} + +/** + * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type + * @vsi: the VSI being configured + */ +static void ice_vsi_set_rss_params(struct ice_vsi *vsi) +{ + struct ice_hw_common_caps *cap; + struct ice_pf *pf = vsi->back; + + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + vsi->rss_size = 1; + return; + } + + cap = &pf->hw.func_caps.common_cap; + switch (vsi->type) { + case ICE_VSI_PF: + /* PF VSI will inherit RSS instance of PF */ + vsi->rss_table_size = cap->rss_table_size; + vsi->rss_size = min_t(int, num_online_cpus(), + BIT(cap->rss_table_entry_width)); + vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; + break; + case ICE_VSI_VF: + /* VF VSI will gets a small RSS table + * For VSI_LUT, LUT size should be set to 64 bytes + */ + vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + vsi->rss_size = min_t(int, num_online_cpus(), + BIT(cap->rss_table_entry_width)); + vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; + break; + default: + dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", + vsi->type); + break; + } +} + +/** + * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI + * @ctxt: the VSI context being set + * + * This initializes a default VSI context for all sections except the Queues. + */ +static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) +{ + u32 table = 0; + + memset(&ctxt->info, 0, sizeof(ctxt->info)); + /* VSI's should be allocated from shared pool */ + ctxt->alloc_from_pool = true; + /* Src pruning enabled by default */ + ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; + /* Traffic from VSI can be sent to LAN */ + ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; + /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy + * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all + * packets untagged/tagged. + */ + ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & + ICE_AQ_VSI_VLAN_MODE_M) >> + ICE_AQ_VSI_VLAN_MODE_S); + /* Have 1:1 UP mapping for both ingress/egress tables */ + table |= ICE_UP_TABLE_TRANSLATE(0, 0); + table |= ICE_UP_TABLE_TRANSLATE(1, 1); + table |= ICE_UP_TABLE_TRANSLATE(2, 2); + table |= ICE_UP_TABLE_TRANSLATE(3, 3); + table |= ICE_UP_TABLE_TRANSLATE(4, 4); + table |= ICE_UP_TABLE_TRANSLATE(5, 5); + table |= ICE_UP_TABLE_TRANSLATE(6, 6); + table |= ICE_UP_TABLE_TRANSLATE(7, 7); + ctxt->info.ingress_table = cpu_to_le32(table); + ctxt->info.egress_table = cpu_to_le32(table); + /* Have 1:1 UP mapping for outer to inner UP table */ + ctxt->info.outer_up_table = cpu_to_le32(table); + /* No Outer tag support outer_tag_flags remains to zero */ +} + +/** + * ice_vsi_setup_q_map - Setup a VSI queue map + * @vsi: the VSI being configured + * @ctxt: VSI context structure + */ +static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) +{ + u16 offset = 0, qmap = 0, numq_tc; + u16 pow = 0, max_rss = 0, qcount; + u16 qcount_tx = vsi->alloc_txq; + u16 qcount_rx = vsi->alloc_rxq; + bool ena_tc0 = false; + int i; + + /* at least TC0 should be enabled by default */ + if (vsi->tc_cfg.numtc) { + if (!(vsi->tc_cfg.ena_tc & BIT(0))) + ena_tc0 = true; + } else { + ena_tc0 = true; + } + + if (ena_tc0) { + vsi->tc_cfg.numtc++; + vsi->tc_cfg.ena_tc |= 1; + } + + numq_tc = qcount_rx / vsi->tc_cfg.numtc; + + /* TC mapping is a function of the number of Rx queues assigned to the + * VSI for each traffic class and the offset of these queues. + * The first 10 bits are for queue offset for TC0, next 4 bits for no:of + * queues allocated to TC0. No:of queues is a power-of-2. + * + * If TC is not enabled, the queue offset is set to 0, and allocate one + * queue, this way, traffic for the given TC will be sent to the default + * queue. + * + * Setup number and offset of Rx queues for all TCs for the VSI + */ + + qcount = numq_tc; + /* qcount will change if RSS is enabled */ + if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { + if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) { + if (vsi->type == ICE_VSI_PF) + max_rss = ICE_MAX_LG_RSS_QS; + else + max_rss = ICE_MAX_SMALL_RSS_QS; + qcount = min_t(int, numq_tc, max_rss); + qcount = min_t(int, qcount, vsi->rss_size); + } + } + + /* find the (rounded up) power-of-2 of qcount */ + pow = order_base_2(qcount); + + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->tc_cfg.ena_tc & BIT(i))) { + /* TC is not enabled */ + vsi->tc_cfg.tc_info[i].qoffset = 0; + vsi->tc_cfg.tc_info[i].qcount = 1; + ctxt->info.tc_mapping[i] = 0; + continue; + } + + /* TC is enabled */ + vsi->tc_cfg.tc_info[i].qoffset = offset; + vsi->tc_cfg.tc_info[i].qcount = qcount; + + qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & + ICE_AQ_VSI_TC_Q_OFFSET_M) | + ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & + ICE_AQ_VSI_TC_Q_NUM_M); + offset += qcount; + ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); + } + + vsi->num_txq = qcount_tx; + vsi->num_rxq = offset; + + if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { + dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); + /* since there is a chance that num_rxq could have been changed + * in the above for loop, make num_txq equal to num_rxq. + */ + vsi->num_txq = vsi->num_rxq; + } + + /* Rx queue mapping */ + ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); + /* q_mapping buffer holds the info for the first queue allocated for + * this VSI in the PF space and also the number of queues associated + * with this VSI. + */ + ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); + ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); +} + +/** + * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI + * @ctxt: the VSI context being set + * @vsi: the VSI being configured + */ +static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) +{ + u8 lut_type, hash_type; + + switch (vsi->type) { + case ICE_VSI_PF: + /* PF VSI will inherit RSS instance of PF */ + lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; + hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + break; + case ICE_VSI_VF: + /* VF VSI will gets a small RSS table which is a VSI LUT type */ + lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; + hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + break; + default: + dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", + vsi->type); + return; + } + + ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & + ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | + ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & + ICE_AQ_VSI_Q_OPT_RSS_HASH_M); +} + +/** + * ice_vsi_init - Create and initialize a VSI + * @vsi: the VSI being configured + * + * This initializes a VSI context depending on the VSI type to be added and + * passes it down to the add_vsi aq command to create a new VSI. + */ +static int ice_vsi_init(struct ice_vsi *vsi) +{ + struct ice_vsi_ctx ctxt = { 0 }; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + int ret = 0; + + switch (vsi->type) { + case ICE_VSI_PF: + ctxt.flags = ICE_AQ_VSI_TYPE_PF; + break; + case ICE_VSI_VF: + ctxt.flags = ICE_AQ_VSI_TYPE_VF; + /* VF number here is the absolute VF number (0-255) */ + ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; + break; + default: + return -ENODEV; + } + + ice_set_dflt_vsi_ctx(&ctxt); + /* if the switch is in VEB mode, allow VSI loopback */ + if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) + ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + + /* Set LUT type and HASH type if RSS is enabled */ + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + ice_set_rss_vsi_ctx(&ctxt, vsi); + + ctxt.info.sw_id = vsi->port_info->sw_id; + ice_vsi_setup_q_map(vsi, &ctxt); + + ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL); + if (ret) { + dev_err(&pf->pdev->dev, + "Add VSI failed, err %d\n", ret); + return -EIO; + } + + /* keep context for update VSI operations */ + vsi->info = ctxt.info; + + /* record VSI number returned */ + vsi->vsi_num = ctxt.vsi_num; + + return ret; +} + +/** + * ice_free_q_vector - Free memory allocated for a specific interrupt vector + * @vsi: VSI having the memory freed + * @v_idx: index of the vector to be freed + */ +static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) +{ + struct ice_q_vector *q_vector; + struct ice_ring *ring; + + if (!vsi->q_vectors[v_idx]) { + dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n", + v_idx); + return; + } + q_vector = vsi->q_vectors[v_idx]; + + ice_for_each_ring(ring, q_vector->tx) + ring->q_vector = NULL; + ice_for_each_ring(ring, q_vector->rx) + ring->q_vector = NULL; + + /* only VSI with an associated netdev is set up with NAPI */ + if (vsi->netdev) + netif_napi_del(&q_vector->napi); + + devm_kfree(&vsi->back->pdev->dev, q_vector); + vsi->q_vectors[v_idx] = NULL; +} + +/** + * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors + * @vsi: the VSI having memory freed + */ +void ice_vsi_free_q_vectors(struct ice_vsi *vsi) +{ + int v_idx; + + for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) + ice_free_q_vector(vsi, v_idx); +} + +/** + * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector + * @vsi: the VSI being configured + * @v_idx: index of the vector in the VSI struct + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) +{ + struct ice_pf *pf = vsi->back; + struct ice_q_vector *q_vector; + + /* allocate q_vector */ + q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + q_vector->vsi = vsi; + q_vector->v_idx = v_idx; + if (vsi->type == ICE_VSI_VF) + goto out; + /* only set affinity_mask if the CPU is online */ + if (cpu_online(v_idx)) + cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + + /* This will not be called in the driver load path because the netdev + * will not be created yet. All other cases with register the NAPI + * handler here (i.e. resume, reset/rebuild, etc.) + */ + if (vsi->netdev) + netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, + NAPI_POLL_WEIGHT); + +out: + /* tie q_vector and VSI together */ + vsi->q_vectors[v_idx] = q_vector; + + return 0; +} + +/** + * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors + * @vsi: the VSI being configured + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int v_idx = 0, num_q_vectors; + int err; + + if (vsi->q_vectors[0]) { + dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", + vsi->vsi_num); + return -EEXIST; + } + + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { + num_q_vectors = vsi->num_q_vectors; + } else { + err = -EINVAL; + goto err_out; + } + + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { + err = ice_vsi_alloc_q_vector(vsi, v_idx); + if (err) + goto err_out; + } + + return 0; + +err_out: + while (v_idx--) + ice_free_q_vector(vsi, v_idx); + + dev_err(&pf->pdev->dev, + "Failed to allocate %d q_vector for VSI %d, ret=%d\n", + vsi->num_q_vectors, vsi->vsi_num, err); + vsi->num_q_vectors = 0; + return err; +} + +/** + * ice_vsi_setup_vector_base - Set up the base vector for the given VSI + * @vsi: ptr to the VSI + * + * This should only be called after ice_vsi_alloc() which allocates the + * corresponding SW VSI structure and initializes num_queue_pairs for the + * newly allocated VSI. + * + * Returns 0 on success or negative on failure + */ +static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int num_q_vectors = 0; + + if (vsi->sw_base_vector || vsi->hw_base_vector) { + dev_dbg(&pf->pdev->dev, "VSI %d has non-zero HW base vector %d or SW base vector %d\n", + vsi->vsi_num, vsi->hw_base_vector, vsi->sw_base_vector); + return -EEXIST; + } + + if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + return -ENOENT; + + switch (vsi->type) { + case ICE_VSI_PF: + num_q_vectors = vsi->num_q_vectors; + /* reserve slots from OS requested IRQs */ + vsi->sw_base_vector = ice_get_res(pf, pf->sw_irq_tracker, + num_q_vectors, vsi->idx); + if (vsi->sw_base_vector < 0) { + dev_err(&pf->pdev->dev, + "Failed to get tracking for %d SW vectors for VSI %d, err=%d\n", + num_q_vectors, vsi->vsi_num, + vsi->sw_base_vector); + return -ENOENT; + } + pf->num_avail_sw_msix -= num_q_vectors; + + /* reserve slots from HW interrupts */ + vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, + num_q_vectors, vsi->idx); + break; + case ICE_VSI_VF: + /* take VF misc vector and data vectors into account */ + num_q_vectors = pf->num_vf_msix; + /* For VF VSI, reserve slots only from HW interrupts */ + vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, + num_q_vectors, vsi->idx); + break; + default: + dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", + vsi->type); + break; + } + + if (vsi->hw_base_vector < 0) { + dev_err(&pf->pdev->dev, + "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n", + num_q_vectors, vsi->vsi_num, vsi->hw_base_vector); + if (vsi->type != ICE_VSI_VF) { + ice_free_res(vsi->back->sw_irq_tracker, + vsi->sw_base_vector, vsi->idx); + pf->num_avail_sw_msix += num_q_vectors; + } + return -ENOENT; + } + + pf->num_avail_hw_msix -= num_q_vectors; + + return 0; +} + +/** + * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI + * @vsi: the VSI having rings deallocated + */ +static void ice_vsi_clear_rings(struct ice_vsi *vsi) +{ + int i; + + if (vsi->tx_rings) { + for (i = 0; i < vsi->alloc_txq; i++) { + if (vsi->tx_rings[i]) { + kfree_rcu(vsi->tx_rings[i], rcu); + vsi->tx_rings[i] = NULL; + } + } + } + if (vsi->rx_rings) { + for (i = 0; i < vsi->alloc_rxq; i++) { + if (vsi->rx_rings[i]) { + kfree_rcu(vsi->rx_rings[i], rcu); + vsi->rx_rings[i] = NULL; + } + } + } +} + +/** + * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI + * @vsi: VSI which is having rings allocated + */ +static int ice_vsi_alloc_rings(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int i; + + /* Allocate tx_rings */ + for (i = 0; i < vsi->alloc_txq; i++) { + struct ice_ring *ring; + + /* allocate with kzalloc(), free with kfree_rcu() */ + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + + if (!ring) + goto err_out; + + ring->q_index = i; + ring->reg_idx = vsi->txq_map[i]; + ring->ring_active = false; + ring->vsi = vsi; + ring->dev = &pf->pdev->dev; + ring->count = vsi->num_desc; + vsi->tx_rings[i] = ring; + } + + /* Allocate rx_rings */ + for (i = 0; i < vsi->alloc_rxq; i++) { + struct ice_ring *ring; + + /* allocate with kzalloc(), free with kfree_rcu() */ + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_out; + + ring->q_index = i; + ring->reg_idx = vsi->rxq_map[i]; + ring->ring_active = false; + ring->vsi = vsi; + ring->netdev = vsi->netdev; + ring->dev = &pf->pdev->dev; + ring->count = vsi->num_desc; + vsi->rx_rings[i] = ring; + } + + return 0; + +err_out: + ice_vsi_clear_rings(vsi); + return -ENOMEM; +} + +/** + * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors + * @vsi: the VSI being configured + * + * This function maps descriptor rings to the queue-specific vectors allotted + * through the MSI-X enabling code. On a constrained vector budget, we map Tx + * and Rx rings to the vector as "efficiently" as possible. + */ +static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) +{ + int q_vectors = vsi->num_q_vectors; + int tx_rings_rem, rx_rings_rem; + int v_id; + + /* initially assigning remaining rings count to VSIs num queue value */ + tx_rings_rem = vsi->num_txq; + rx_rings_rem = vsi->num_rxq; + + for (v_id = 0; v_id < q_vectors; v_id++) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; + int tx_rings_per_v, rx_rings_per_v, q_id, q_base; + + /* Tx rings mapping to vector */ + tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); + q_vector->num_ring_tx = tx_rings_per_v; + q_vector->tx.ring = NULL; + q_vector->tx.itr_idx = ICE_TX_ITR; + q_base = vsi->num_txq - tx_rings_rem; + + for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { + struct ice_ring *tx_ring = vsi->tx_rings[q_id]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + } + tx_rings_rem -= tx_rings_per_v; + + /* Rx rings mapping to vector */ + rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); + q_vector->num_ring_rx = rx_rings_per_v; + q_vector->rx.ring = NULL; + q_vector->rx.itr_idx = ICE_RX_ITR; + q_base = vsi->num_rxq - rx_rings_rem; + + for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { + struct ice_ring *rx_ring = vsi->rx_rings[q_id]; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + } + rx_rings_rem -= rx_rings_per_v; + } +} + +/** + * ice_vsi_manage_rss_lut - disable/enable RSS + * @vsi: the VSI being changed + * @ena: boolean value indicating if this is an enable or disable request + * + * In the event of disable request for RSS, this function will zero out RSS + * LUT, while in the event of enable request for RSS, it will reconfigure RSS + * LUT. + */ +int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) +{ + int err = 0; + u8 *lut; + + lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size, + GFP_KERNEL); + if (!lut) + return -ENOMEM; + + if (ena) { + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); + else + ice_fill_rss_lut(lut, vsi->rss_table_size, + vsi->rss_size); + } + + err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size); + devm_kfree(&vsi->back->pdev->dev, lut); + return err; +} + +/** + * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI + * @vsi: VSI to be configured + */ +static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) +{ + u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; + struct ice_aqc_get_set_rss_keys *key; + struct ice_pf *pf = vsi->back; + enum ice_status status; + int err = 0; + u8 *lut; + + vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); + + lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); + else + ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); + + status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut, + vsi->rss_table_size); + + if (status) { + dev_err(&vsi->back->pdev->dev, + "set_rss_lut failed, error %d\n", status); + err = -EIO; + goto ice_vsi_cfg_rss_exit; + } + + key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL); + if (!key) { + err = -ENOMEM; + goto ice_vsi_cfg_rss_exit; + } + + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, + ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); + else + netdev_rss_key_fill((void *)seed, + ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); + memcpy(&key->standard_rss_key, seed, + ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); + + status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key); + + if (status) { + dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n", + status); + err = -EIO; + } + + devm_kfree(&pf->pdev->dev, key); +ice_vsi_cfg_rss_exit: + devm_kfree(&pf->pdev->dev, lut); + return err; +} + +/** + * ice_add_mac_to_list - Add a mac address filter entry to the list + * @vsi: the VSI to be forwarded to + * @add_list: pointer to the list which contains MAC filter entries + * @macaddr: the MAC address to be added. + * + * Adds mac address filter entry to the temp list + * + * Returns 0 on success or ENOMEM on failure. + */ +int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, + const u8 *macaddr) +{ + struct ice_fltr_list_entry *tmp; + struct ice_pf *pf = vsi->back; + + tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + + tmp->fltr_info.flag = ICE_FLTR_TX; + tmp->fltr_info.src_id = ICE_SRC_ID_VSI; + tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; + tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp->fltr_info.vsi_handle = vsi->idx; + ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); + + INIT_LIST_HEAD(&tmp->list_entry); + list_add(&tmp->list_entry, add_list); + + return 0; +} + +/** + * ice_update_eth_stats - Update VSI-specific ethernet statistics counters + * @vsi: the VSI to be updated + */ +void ice_update_eth_stats(struct ice_vsi *vsi) +{ + struct ice_eth_stats *prev_es, *cur_es; + struct ice_hw *hw = &vsi->back->hw; + u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ + + prev_es = &vsi->eth_stats_prev; + cur_es = &vsi->eth_stats; + + ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->rx_bytes, + &cur_es->rx_bytes); + + ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->rx_unicast, + &cur_es->rx_unicast); + + ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->rx_multicast, + &cur_es->rx_multicast); + + ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->rx_broadcast, + &cur_es->rx_broadcast); + + ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_discards, &cur_es->rx_discards); + + ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->tx_bytes, + &cur_es->tx_bytes); + + ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->tx_unicast, + &cur_es->tx_unicast); + + ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->tx_multicast, + &cur_es->tx_multicast); + + ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), + vsi->stat_offsets_loaded, &prev_es->tx_broadcast, + &cur_es->tx_broadcast); + + ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_errors, &cur_es->tx_errors); + + vsi->stat_offsets_loaded = true; +} + +/** + * ice_free_fltr_list - free filter lists helper + * @dev: pointer to the device struct + * @h: pointer to the list head to be freed + * + * Helper function to free filter lists previously created using + * ice_add_mac_to_list + */ +void ice_free_fltr_list(struct device *dev, struct list_head *h) +{ + struct ice_fltr_list_entry *e, *tmp; + + list_for_each_entry_safe(e, tmp, h, list_entry) { + list_del(&e->list_entry); + devm_kfree(dev, e); + } +} + +/** + * ice_vsi_add_vlan - Add VSI membership for given VLAN + * @vsi: the VSI being configured + * @vid: VLAN id to be added + */ +int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) +{ + struct ice_fltr_list_entry *tmp; + struct ice_pf *pf = vsi->back; + LIST_HEAD(tmp_add_list); + enum ice_status status; + int err = 0; + + tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; + tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp->fltr_info.flag = ICE_FLTR_TX; + tmp->fltr_info.src_id = ICE_SRC_ID_VSI; + tmp->fltr_info.vsi_handle = vsi->idx; + tmp->fltr_info.l_data.vlan.vlan_id = vid; + + INIT_LIST_HEAD(&tmp->list_entry); + list_add(&tmp->list_entry, &tmp_add_list); + + status = ice_add_vlan(&pf->hw, &tmp_add_list); + if (status) { + err = -ENODEV; + dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", + vid, vsi->vsi_num); + } + + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + return err; +} + +/** + * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN + * @vsi: the VSI being configured + * @vid: VLAN id to be removed + * + * Returns 0 on success and negative on failure + */ +int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) +{ + struct ice_fltr_list_entry *list; + struct ice_pf *pf = vsi->back; + LIST_HEAD(tmp_add_list); + int status = 0; + + list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); + if (!list) + return -ENOMEM; + + list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; + list->fltr_info.vsi_handle = vsi->idx; + list->fltr_info.fltr_act = ICE_FWD_TO_VSI; + list->fltr_info.l_data.vlan.vlan_id = vid; + list->fltr_info.flag = ICE_FLTR_TX; + list->fltr_info.src_id = ICE_SRC_ID_VSI; + + INIT_LIST_HEAD(&list->list_entry); + list_add(&list->list_entry, &tmp_add_list); + + if (ice_remove_vlan(&pf->hw, &tmp_add_list)) { + dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", + vid, vsi->vsi_num); + status = -EIO; + } + + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + return status; +} + +/** + * ice_vsi_cfg_rxqs - Configure the VSI for Rx + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Rx VSI for operation. + */ +int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) +{ + int err = 0; + u16 i; + + if (vsi->type == ICE_VSI_VF) + goto setup_rings; + + if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) + vsi->max_frame = vsi->netdev->mtu + + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + else + vsi->max_frame = ICE_RXBUF_2048; + + vsi->rx_buf_len = ICE_RXBUF_2048; +setup_rings: + /* set up individual rings */ + for (i = 0; i < vsi->num_rxq && !err; i++) + err = ice_setup_rx_ctx(vsi->rx_rings[i]); + + if (err) { + dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); + return -EIO; + } + return err; +} + +/** + * ice_vsi_cfg_txqs - Configure the VSI for Tx + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Tx VSI for operation. + */ +int ice_vsi_cfg_txqs(struct ice_vsi *vsi) +{ + struct ice_aqc_add_tx_qgrp *qg_buf; + struct ice_aqc_add_txqs_perq *txq; + struct ice_pf *pf = vsi->back; + enum ice_status status; + u16 buf_len, i, pf_q; + int err = 0, tc = 0; + u8 num_q_grps; + + buf_len = sizeof(struct ice_aqc_add_tx_qgrp); + qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); + if (!qg_buf) + return -ENOMEM; + + if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) { + err = -EINVAL; + goto err_cfg_txqs; + } + qg_buf->num_txqs = 1; + num_q_grps = 1; + + /* set up and configure the Tx queues */ + ice_for_each_txq(vsi, i) { + struct ice_tlan_ctx tlan_ctx = { 0 }; + + pf_q = vsi->txq_map[i]; + ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); + /* copy context contents into the qg_buf */ + qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); + ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, + ice_tlan_ctx_info); + + /* init queue specific tail reg. It is referred as transmit + * comm scheduler queue doorbell. + */ + vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); + status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, + num_q_grps, qg_buf, buf_len, NULL); + if (status) { + dev_err(&vsi->back->pdev->dev, + "Failed to set LAN Tx queue context, error: %d\n", + status); + err = -ENODEV; + goto err_cfg_txqs; + } + + /* Add Tx Queue TEID into the VSI Tx ring from the response + * This will complete configuring and enabling the queue. + */ + txq = &qg_buf->txqs[0]; + if (pf_q == le16_to_cpu(txq->txq_id)) + vsi->tx_rings[i]->txq_teid = + le32_to_cpu(txq->q_teid); + } +err_cfg_txqs: + devm_kfree(&pf->pdev->dev, qg_buf); + return err; +} + +/** + * ice_intrl_usec_to_reg - convert interrupt rate limit to register value + * @intrl: interrupt rate limit in usecs + * @gran: interrupt rate limit granularity in usecs + * + * This function converts a decimal interrupt rate limit in usecs to the format + * expected by firmware. + */ +static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) +{ + u32 val = intrl / gran; + + if (val) + return val | GLINT_RATE_INTRL_ENA_M; + return 0; +} + +/** + * ice_cfg_itr - configure the initial interrupt throttle values + * @hw: pointer to the HW structure + * @q_vector: interrupt vector that's being configured + * @vector: HW vector index to apply the interrupt throttling to + * + * Configure interrupt throttling values for the ring containers that are + * associated with the interrupt vector passed in. + */ +static void +ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) +{ + u8 itr_gran = hw->itr_gran; + + if (q_vector->num_ring_rx) { + struct ice_ring_container *rc = &q_vector->rx; + + rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran); + rc->latency_range = ICE_LOW_LATENCY; + wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr); + } + + if (q_vector->num_ring_tx) { + struct ice_ring_container *rc = &q_vector->tx; + + rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran); + rc->latency_range = ICE_LOW_LATENCY; + wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr); + } +} + +/** + * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW + * @vsi: the VSI being configured + */ +void ice_vsi_cfg_msix(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + u16 vector = vsi->hw_base_vector; + struct ice_hw *hw = &pf->hw; + u32 txq = 0, rxq = 0; + int i, q; + + for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + struct ice_q_vector *q_vector = vsi->q_vectors[i]; + + ice_cfg_itr(hw, q_vector, vector); + + wr32(hw, GLINT_RATE(vector), + ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); + + /* Both Transmit Queue Interrupt Cause Control register + * and Receive Queue Interrupt Cause control register + * expects MSIX_INDX field to be the vector index + * within the function space and not the absolute + * vector index across PF or across device. + * For SR-IOV VF VSIs queue vector index always starts + * with 1 since first vector index(0) is used for OICR + * in VF space. Since VMDq and other PF VSIs are within + * the PF function space, use the vector index that is + * tracked for this PF. + */ + for (q = 0; q < q_vector->num_ring_tx; q++) { + int itr_idx = q_vector->tx.itr_idx; + u32 val; + + if (vsi->type == ICE_VSI_VF) + val = QINT_TQCTL_CAUSE_ENA_M | + (itr_idx << QINT_TQCTL_ITR_INDX_S) | + ((i + 1) << QINT_TQCTL_MSIX_INDX_S); + else + val = QINT_TQCTL_CAUSE_ENA_M | + (itr_idx << QINT_TQCTL_ITR_INDX_S) | + (vector << QINT_TQCTL_MSIX_INDX_S); + wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); + txq++; + } + + for (q = 0; q < q_vector->num_ring_rx; q++) { + int itr_idx = q_vector->rx.itr_idx; + u32 val; + + if (vsi->type == ICE_VSI_VF) + val = QINT_RQCTL_CAUSE_ENA_M | + (itr_idx << QINT_RQCTL_ITR_INDX_S) | + ((i + 1) << QINT_RQCTL_MSIX_INDX_S); + else + val = QINT_RQCTL_CAUSE_ENA_M | + (itr_idx << QINT_RQCTL_ITR_INDX_S) | + (vector << QINT_RQCTL_MSIX_INDX_S); + wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); + rxq++; + } + } + + ice_flush(hw); +} + +/** + * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx + * @vsi: the VSI being changed + */ +int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + /* Here we are configuring the VSI to let the driver add VLAN tags by + * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag + * insertion happens in the Tx hot path, in ice_tx_map. + */ + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; + + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + + status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (status) { + dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + return -EIO; + } + + vsi->info.vlan_flags = ctxt.info.vlan_flags; + return 0; +} + +/** + * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx + * @vsi: the VSI being changed + * @ena: boolean value indicating if this is a enable or disable request + */ +int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + /* Here we are configuring what the VSI should do with the VLAN tag in + * the Rx packet. We can either leave the tag in the packet or put it in + * the Rx descriptor. + */ + if (ena) { + /* Strip VLAN tag from Rx packet and put it in the desc */ + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; + } else { + /* Disable stripping. Leave tag in packet */ + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; + } + + /* Allow all packets untagged/tagged */ + ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; + + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + + status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (status) { + dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", + ena, status, hw->adminq.sq_last_status); + return -EIO; + } + + vsi->info.vlan_flags = ctxt.info.vlan_flags; + return 0; +} + +/** + * ice_vsi_start_rx_rings - start VSI's Rx rings + * @vsi: the VSI whose rings are to be started + * + * Returns 0 on success and a negative value on error + */ +int ice_vsi_start_rx_rings(struct ice_vsi *vsi) +{ + return ice_vsi_ctrl_rx_rings(vsi, true); +} + +/** + * ice_vsi_stop_rx_rings - stop VSI's Rx rings + * @vsi: the VSI + * + * Returns 0 on success and a negative value on error + */ +int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) +{ + return ice_vsi_ctrl_rx_rings(vsi, false); +} + +/** + * ice_vsi_stop_tx_rings - Disable Tx rings + * @vsi: the VSI being configured + * @rst_src: reset source + * @rel_vmvf_num: Relative id of VF/VM + */ +int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u32 *q_teids, val; + u16 *q_ids, i; + int err = 0; + + if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) + return -EINVAL; + + q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), + GFP_KERNEL); + if (!q_teids) + return -ENOMEM; + + q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), + GFP_KERNEL); + if (!q_ids) { + err = -ENOMEM; + goto err_alloc_q_ids; + } + + /* set up the Tx queue list to be disabled */ + ice_for_each_txq(vsi, i) { + u16 v_idx; + + if (!vsi->tx_rings || !vsi->tx_rings[i]) { + err = -EINVAL; + goto err_out; + } + + q_ids[i] = vsi->txq_map[i]; + q_teids[i] = vsi->tx_rings[i]->txq_teid; + + /* clear cause_ena bit for disabled queues */ + val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); + val &= ~QINT_TQCTL_CAUSE_ENA_M; + wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); + + /* software is expected to wait for 100 ns */ + ndelay(100); + + /* trigger a software interrupt for the vector associated to + * the queue to schedule NAPI handler + */ + v_idx = vsi->tx_rings[i]->q_vector->v_idx; + wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), + GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); + } + status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, + rst_src, rel_vmvf_num, NULL); + /* if the disable queue command was exercised during an active reset + * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as + * the reset operation disables queues at the hardware level anyway. + */ + if (status == ICE_ERR_RESET_ONGOING) { + dev_info(&pf->pdev->dev, + "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status) { + dev_err(&pf->pdev->dev, + "Failed to disable LAN Tx queues, error: %d\n", + status); + err = -ENODEV; + } + +err_out: + devm_kfree(&pf->pdev->dev, q_ids); + +err_alloc_q_ids: + devm_kfree(&pf->pdev->dev, q_teids); + + return err; +} + +/** + * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI + * @vsi: VSI to enable or disable VLAN pruning on + * @ena: set to true to enable VLAN pruning and false to disable it + * + * returns 0 if VSI is updated, negative otherwise + */ +int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) +{ + struct ice_vsi_ctx *ctxt; + struct device *dev; + int status; + + if (!vsi) + return -EINVAL; + + dev = &vsi->back->pdev->dev; + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; + + ctxt->info = vsi->info; + + if (ena) { + ctxt->info.sec_flags |= + ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; + ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } else { + ctxt->info.sec_flags &= + ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } + + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | + ICE_AQ_VSI_PROP_SW_VALID); + + status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL); + if (status) { + netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", + ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status, + vsi->back->hw.adminq.sq_last_status); + goto err_out; + } + + vsi->info.sec_flags = ctxt->info.sec_flags; + vsi->info.sw_flags2 = ctxt->info.sw_flags2; + + devm_kfree(dev, ctxt); + return 0; + +err_out: + devm_kfree(dev, ctxt); + return -EIO; +} + +/** + * ice_vsi_setup - Set up a VSI by a given type + * @pf: board private structure + * @pi: pointer to the port_info instance + * @type: VSI type + * @vf_id: defines VF id to which this VSI connects. This field is meant to be + * used only for ICE_VSI_VF VSI type. For other VSI types, should + * fill-in ICE_INVAL_VFID as input. + * + * This allocates the sw VSI structure and its queue resources. + * + * Returns pointer to the successfully allocated and configured VSI sw struct on + * success, NULL on failure. + */ +struct ice_vsi * +ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, + enum ice_vsi_type type, u16 vf_id) +{ + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct device *dev = &pf->pdev->dev; + struct ice_vsi *vsi; + int ret, i; + + vsi = ice_vsi_alloc(pf, type); + if (!vsi) { + dev_err(dev, "could not allocate VSI\n"); + return NULL; + } + + vsi->port_info = pi; + vsi->vsw = pf->first_sw; + if (vsi->type == ICE_VSI_VF) + vsi->vf_id = vf_id; + + if (ice_vsi_get_qs(vsi)) { + dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", + vsi->idx); + goto unroll_get_qs; + } + + /* set RSS capabilities */ + ice_vsi_set_rss_params(vsi); + + /* create the VSI */ + ret = ice_vsi_init(vsi); + if (ret) + goto unroll_get_qs; + + switch (vsi->type) { + case ICE_VSI_PF: + ret = ice_vsi_alloc_q_vectors(vsi); + if (ret) + goto unroll_vsi_init; + + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto unroll_alloc_q_vector; + + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto unroll_vector_base; + + ice_vsi_map_rings_to_vectors(vsi); + + /* Do not exit if configuring RSS had an issue, at least + * receive traffic on first queue. Hence no need to capture + * return value + */ + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + ice_vsi_cfg_rss_lut_key(vsi); + break; + case ICE_VSI_VF: + /* VF driver will take care of creating netdev for this type and + * map queues to vectors through Virtchnl, PF driver only + * creates a VSI and corresponding structures for bookkeeping + * purpose + */ + ret = ice_vsi_alloc_q_vectors(vsi); + if (ret) + goto unroll_vsi_init; + + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto unroll_alloc_q_vector; + + /* Setup Vector base only during VF init phase or when VF asks + * for more vectors than assigned number. In all other cases, + * assign hw_base_vector to the value given earlier. + */ + if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) { + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto unroll_vector_base; + } else { + vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx; + } + pf->q_left_tx -= vsi->alloc_txq; + pf->q_left_rx -= vsi->alloc_rxq; + break; + default: + /* if VSI type is not recognized, clean up the resources and + * exit + */ + goto unroll_vsi_init; + } + + ice_vsi_set_tc_cfg(vsi); + + /* configure VSI nodes based on number of queues and TC's */ + for (i = 0; i < vsi->tc_cfg.numtc; i++) + max_txqs[i] = vsi->num_txq; + + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (ret) { + dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n"); + goto unroll_vector_base; + } + + return vsi; + +unroll_vector_base: + /* reclaim SW interrupts back to the common pool */ + ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); + pf->num_avail_sw_msix += vsi->num_q_vectors; + /* reclaim HW interrupt back to the common pool */ + ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); + pf->num_avail_hw_msix += vsi->num_q_vectors; +unroll_alloc_q_vector: + ice_vsi_free_q_vectors(vsi); +unroll_vsi_init: + ice_vsi_delete(vsi); +unroll_get_qs: + ice_vsi_put_qs(vsi); + pf->q_left_tx += vsi->alloc_txq; + pf->q_left_rx += vsi->alloc_rxq; + ice_vsi_clear(vsi); + + return NULL; +} + +/** + * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW + * @vsi: the VSI being cleaned up + */ +static void ice_vsi_release_msix(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + u16 vector = vsi->hw_base_vector; + struct ice_hw *hw = &pf->hw; + u32 txq = 0; + u32 rxq = 0; + int i, q; + + for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + struct ice_q_vector *q_vector = vsi->q_vectors[i]; + + wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0); + wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0); + for (q = 0; q < q_vector->num_ring_tx; q++) { + wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); + txq++; + } + + for (q = 0; q < q_vector->num_ring_rx; q++) { + wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); + rxq++; + } + } + + ice_flush(hw); +} + +/** + * ice_vsi_free_irq - Free the IRQ association with the OS + * @vsi: the VSI being configured + */ +void ice_vsi_free_irq(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int base = vsi->sw_base_vector; + + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { + int i; + + if (!vsi->q_vectors || !vsi->irqs_ready) + return; + + ice_vsi_release_msix(vsi); + if (vsi->type == ICE_VSI_VF) + return; + + vsi->irqs_ready = false; + for (i = 0; i < vsi->num_q_vectors; i++) { + u16 vector = i + base; + int irq_num; + + irq_num = pf->msix_entries[vector].vector; + + /* free only the irqs that were actually requested */ + if (!vsi->q_vectors[i] || + !(vsi->q_vectors[i]->num_ring_tx || + vsi->q_vectors[i]->num_ring_rx)) + continue; + + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(irq_num, NULL); + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(irq_num, NULL); + synchronize_irq(irq_num); + devm_free_irq(&pf->pdev->dev, irq_num, + vsi->q_vectors[i]); + } + } +} + +/** + * ice_vsi_free_tx_rings - Free Tx resources for VSI queues + * @vsi: the VSI having resources freed + */ +void ice_vsi_free_tx_rings(struct ice_vsi *vsi) +{ + int i; + + if (!vsi->tx_rings) + return; + + ice_for_each_txq(vsi, i) + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) + ice_free_tx_ring(vsi->tx_rings[i]); +} + +/** + * ice_vsi_free_rx_rings - Free Rx resources for VSI queues + * @vsi: the VSI having resources freed + */ +void ice_vsi_free_rx_rings(struct ice_vsi *vsi) +{ + int i; + + if (!vsi->rx_rings) + return; + + ice_for_each_rxq(vsi, i) + if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) + ice_free_rx_ring(vsi->rx_rings[i]); +} + +/** + * ice_vsi_close - Shut down a VSI + * @vsi: the VSI being shut down + */ +void ice_vsi_close(struct ice_vsi *vsi) +{ + if (!test_and_set_bit(__ICE_DOWN, vsi->state)) + ice_down(vsi); + + ice_vsi_free_irq(vsi); + ice_vsi_free_tx_rings(vsi); + ice_vsi_free_rx_rings(vsi); +} + +/** + * ice_free_res - free a block of resources + * @res: pointer to the resource + * @index: starting index previously returned by ice_get_res + * @id: identifier to track owner + * + * Returns number of resources freed + */ +int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) +{ + int count = 0; + int i; + + if (!res || index >= res->num_entries) + return -EINVAL; + + id |= ICE_RES_VALID_BIT; + for (i = index; i < res->num_entries && res->list[i] == id; i++) { + res->list[i] = 0; + count++; + } + + return count; +} + +/** + * ice_search_res - Search the tracker for a block of resources + * @res: pointer to the resource + * @needed: size of the block needed + * @id: identifier to track owner + * + * Returns the base item index of the block, or -ENOMEM for error + */ +static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) +{ + int start = res->search_hint; + int end = start; + + if ((start + needed) > res->num_entries) + return -ENOMEM; + + id |= ICE_RES_VALID_BIT; + + do { + /* skip already allocated entries */ + if (res->list[end++] & ICE_RES_VALID_BIT) { + start = end; + if ((start + needed) > res->num_entries) + break; + } + + if (end == (start + needed)) { + int i = start; + + /* there was enough, so assign it to the requestor */ + while (i != end) + res->list[i++] = id; + + if (end == res->num_entries) + end = 0; + + res->search_hint = end; + return start; + } + } while (1); + + return -ENOMEM; +} + +/** + * ice_get_res - get a block of resources + * @pf: board private structure + * @res: pointer to the resource + * @needed: size of the block needed + * @id: identifier to track owner + * + * Returns the base item index of the block, or -ENOMEM for error + * The search_hint trick and lack of advanced fit-finding only works + * because we're highly likely to have all the same sized requests. + * Linear search time and any fragmentation should be minimal. + */ +int +ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) +{ + int ret; + + if (!res || !pf) + return -EINVAL; + + if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { + dev_err(&pf->pdev->dev, + "param err: needed=%d, num_entries = %d id=0x%04x\n", + needed, res->num_entries, id); + return -EINVAL; + } + + /* search based on search_hint */ + ret = ice_search_res(res, needed, id); + + if (ret < 0) { + /* previous search failed. Reset search hint and try again */ + res->search_hint = 0; + ret = ice_search_res(res, needed, id); + } + + return ret; +} + +/** + * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI + * @vsi: the VSI being un-configured + */ +void ice_vsi_dis_irq(struct ice_vsi *vsi) +{ + int base = vsi->sw_base_vector; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + u32 val; + int i; + + /* disable interrupt causation from each queue */ + if (vsi->tx_rings) { + ice_for_each_txq(vsi, i) { + if (vsi->tx_rings[i]) { + u16 reg; + + reg = vsi->tx_rings[i]->reg_idx; + val = rd32(hw, QINT_TQCTL(reg)); + val &= ~QINT_TQCTL_CAUSE_ENA_M; + wr32(hw, QINT_TQCTL(reg), val); + } + } + } + + if (vsi->rx_rings) { + ice_for_each_rxq(vsi, i) { + if (vsi->rx_rings[i]) { + u16 reg; + + reg = vsi->rx_rings[i]->reg_idx; + val = rd32(hw, QINT_RQCTL(reg)); + val &= ~QINT_RQCTL_CAUSE_ENA_M; + wr32(hw, QINT_RQCTL(reg), val); + } + } + } + + /* disable each interrupt */ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { + for (i = vsi->hw_base_vector; + i < (vsi->num_q_vectors + vsi->hw_base_vector); i++) + wr32(hw, GLINT_DYN_CTL(i), 0); + + ice_flush(hw); + for (i = 0; i < vsi->num_q_vectors; i++) + synchronize_irq(pf->msix_entries[i + base].vector); + } +} + +/** + * ice_vsi_release - Delete a VSI and free its resources + * @vsi: the VSI being removed + * + * Returns 0 on success or < 0 on error + */ +int ice_vsi_release(struct ice_vsi *vsi) +{ + struct ice_pf *pf; + struct ice_vf *vf; + + if (!vsi->back) + return -ENODEV; + pf = vsi->back; + vf = &pf->vf[vsi->vf_id]; + /* do not unregister and free netdevs while driver is in the reset + * recovery pending state. Since reset/rebuild happens through PF + * service task workqueue, its not a good idea to unregister netdev + * that is associated to the PF that is running the work queue items + * currently. This is done to avoid check_flush_dependency() warning + * on this wq + */ + if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { + unregister_netdev(vsi->netdev); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } + + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + ice_rss_clean(vsi); + + /* Disable VSI and free resources */ + ice_vsi_dis_irq(vsi); + ice_vsi_close(vsi); + + /* reclaim interrupt vectors back to PF */ + if (vsi->type != ICE_VSI_VF) { + /* reclaim SW interrupts back to the common pool */ + ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, + vsi->idx); + pf->num_avail_sw_msix += vsi->num_q_vectors; + /* reclaim HW interrupts back to the common pool */ + ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, + vsi->idx); + pf->num_avail_hw_msix += vsi->num_q_vectors; + } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) { + /* Reclaim VF resources back only while freeing all VFs or + * vector reassignment is requested + */ + ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx, + vsi->idx); + pf->num_avail_hw_msix += pf->num_vf_msix; + } + + ice_remove_vsi_fltr(&pf->hw, vsi->idx); + ice_vsi_delete(vsi); + ice_vsi_free_q_vectors(vsi); + ice_vsi_clear_rings(vsi); + + ice_vsi_put_qs(vsi); + pf->q_left_tx += vsi->alloc_txq; + pf->q_left_rx += vsi->alloc_rxq; + + /* retain SW VSI data structure since it is needed to unregister and + * free VSI netdev when PF is not in reset recovery pending state,\ + * for ex: during rmmod. + */ + if (!ice_is_reset_in_progress(pf->state)) + ice_vsi_clear(vsi); + + return 0; +} + +/** + * ice_vsi_rebuild - Rebuild VSI after reset + * @vsi: VSI to be rebuild + * + * Returns 0 on success and negative value on failure + */ +int ice_vsi_rebuild(struct ice_vsi *vsi) +{ + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + int ret, i; + + if (!vsi) + return -EINVAL; + + ice_vsi_free_q_vectors(vsi); + ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); + ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); + vsi->sw_base_vector = 0; + vsi->hw_base_vector = 0; + ice_vsi_clear_rings(vsi); + ice_vsi_free_arrays(vsi, false); + ice_vsi_set_num_qs(vsi); + + /* Initialize VSI struct elements and create VSI in FW */ + ret = ice_vsi_init(vsi); + if (ret < 0) + goto err_vsi; + + ret = ice_vsi_alloc_arrays(vsi, false); + if (ret < 0) + goto err_vsi; + + switch (vsi->type) { + case ICE_VSI_PF: + ret = ice_vsi_alloc_q_vectors(vsi); + if (ret) + goto err_rings; + + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto err_vectors; + + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto err_vectors; + + ice_vsi_map_rings_to_vectors(vsi); + break; + case ICE_VSI_VF: + ret = ice_vsi_alloc_q_vectors(vsi); + if (ret) + goto err_rings; + + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto err_vectors; + + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto err_vectors; + + vsi->back->q_left_tx -= vsi->alloc_txq; + vsi->back->q_left_rx -= vsi->alloc_rxq; + break; + default: + break; + } + + ice_vsi_set_tc_cfg(vsi); + + /* configure VSI nodes based on number of queues and TC's */ + for (i = 0; i < vsi->tc_cfg.numtc; i++) + max_txqs[i] = vsi->num_txq; + + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Failed VSI lan queue config\n"); + goto err_vectors; + } + return 0; + +err_vectors: + ice_vsi_free_q_vectors(vsi); +err_rings: + if (vsi->netdev) { + vsi->current_netdev_flags = 0; + unregister_netdev(vsi->netdev); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } +err_vsi: + ice_vsi_clear(vsi); + set_bit(__ICE_RESET_FAILED, vsi->back->state); + return ret; +} + +/** + * ice_is_reset_in_progress - check for a reset in progress + * @state: pf state field + */ +bool ice_is_reset_in_progress(unsigned long *state) +{ + return test_bit(__ICE_RESET_OICR_RECV, state) || + test_bit(__ICE_PFR_REQ, state) || + test_bit(__ICE_CORER_REQ, state) || + test_bit(__ICE_GLOBR_REQ, state); +} diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h new file mode 100644 index 000000000000..677db40338f5 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_LIB_H_ +#define _ICE_LIB_H_ + +#include "ice.h" + +int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, + const u8 *macaddr); + +void ice_free_fltr_list(struct device *dev, struct list_head *h); + +void ice_update_eth_stats(struct ice_vsi *vsi); + +int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); + +int ice_vsi_cfg_txqs(struct ice_vsi *vsi); + +void ice_vsi_cfg_msix(struct ice_vsi *vsi); + +int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); + +int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid); + +int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi); + +int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena); + +int ice_vsi_start_rx_rings(struct ice_vsi *vsi); + +int ice_vsi_stop_rx_rings(struct ice_vsi *vsi); + +int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num); + +int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena); + +void ice_vsi_delete(struct ice_vsi *vsi); + +int ice_vsi_clear(struct ice_vsi *vsi); + +struct ice_vsi * +ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, + enum ice_vsi_type type, u16 vf_id); + +int ice_vsi_release(struct ice_vsi *vsi); + +void ice_vsi_close(struct ice_vsi *vsi); + +int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); + +int +ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id); + +int ice_vsi_rebuild(struct ice_vsi *vsi); + +bool ice_is_reset_in_progress(unsigned long *state); + +void ice_vsi_free_q_vectors(struct ice_vsi *vsi); + +void ice_vsi_put_qs(struct ice_vsi *vsi); + +void ice_vsi_dis_irq(struct ice_vsi *vsi); + +void ice_vsi_free_irq(struct ice_vsi *vsi); + +void ice_vsi_free_rx_rings(struct ice_vsi *vsi); + +void ice_vsi_free_tx_rings(struct ice_vsi *vsi); + +int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); + +int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); + +irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data); +#endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 3f047bb43348..8f61b375e768 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -6,8 +6,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "ice.h" +#include "ice_lib.h" -#define DRV_VERSION "ice-0.7.0-k" +#define DRV_VERSION "0.7.2-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; @@ -15,7 +16,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); static int debug = -1; @@ -31,173 +32,84 @@ static const struct net_device_ops ice_netdev_ops; static void ice_pf_dis_all_vsi(struct ice_pf *pf); static void ice_rebuild(struct ice_pf *pf); -static int ice_vsi_release(struct ice_vsi *vsi); + +static void ice_vsi_release_all(struct ice_pf *pf); static void ice_update_vsi_stats(struct ice_vsi *vsi); static void ice_update_pf_stats(struct ice_pf *pf); /** - * ice_get_free_slot - get the next non-NULL location index in array - * @array: array to search - * @size: size of the array - * @curr: last known occupied index to be used as a search hint - * - * void * is being used to keep the functionality generic. This lets us use this - * function on any array of pointers. + * ice_get_tx_pending - returns number of Tx descriptors not processed + * @ring: the ring of descriptors */ -static int ice_get_free_slot(void *array, int size, int curr) +static u32 ice_get_tx_pending(struct ice_ring *ring) { - int **tmp_array = (int **)array; - int next; + u32 head, tail; - if (curr < (size - 1) && !tmp_array[curr + 1]) { - next = curr + 1; - } else { - int i = 0; + head = ring->next_to_clean; + tail = readl(ring->tail); - while ((i < size) && (tmp_array[i])) - i++; - if (i == size) - next = ICE_NO_VSI; - else - next = i; - } - return next; + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + return 0; } /** - * ice_search_res - Search the tracker for a block of resources - * @res: pointer to the resource - * @needed: size of the block needed - * @id: identifier to track owner - * Returns the base item index of the block, or -ENOMEM for error + * ice_check_for_hang_subtask - check for and recover hung queues + * @pf: pointer to PF struct */ -static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) +static void ice_check_for_hang_subtask(struct ice_pf *pf) { - int start = res->search_hint; - int end = start; - - id |= ICE_RES_VALID_BIT; + struct ice_vsi *vsi = NULL; + unsigned int i; + u32 v, v_idx; + int packets; - do { - /* skip already allocated entries */ - if (res->list[end++] & ICE_RES_VALID_BIT) { - start = end; - if ((start + needed) > res->num_entries) - break; + ice_for_each_vsi(pf, v) + if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { + vsi = pf->vsi[v]; + break; } - if (end == (start + needed)) { - int i = start; + if (!vsi || test_bit(__ICE_DOWN, vsi->state)) + return; - /* there was enough, so assign it to the requestor */ - while (i != end) - res->list[i++] = id; + if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) + return; - if (end == res->num_entries) - end = 0; + for (i = 0; i < vsi->num_txq; i++) { + struct ice_ring *tx_ring = vsi->tx_rings[i]; + + if (tx_ring && tx_ring->desc) { + int itr = ICE_ITR_NONE; + + /* If packet counter has not changed the queue is + * likely stalled, so force an interrupt for this + * queue. + * + * prev_pkt would be negative if there was no + * pending work. + */ + packets = tx_ring->stats.pkts & INT_MAX; + if (tx_ring->tx_stats.prev_pkt == packets) { + /* Trigger sw interrupt to revive the queue */ + v_idx = tx_ring->q_vector->v_idx; + wr32(&vsi->back->hw, + GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), + (itr << GLINT_DYN_CTL_ITR_INDX_S) | + GLINT_DYN_CTL_SWINT_TRIG_M | + GLINT_DYN_CTL_INTENA_MSK_M); + continue; + } - res->search_hint = end; - return start; + /* Memory barrier between read of packet count and call + * to ice_get_tx_pending() + */ + smp_rmb(); + tx_ring->tx_stats.prev_pkt = + ice_get_tx_pending(tx_ring) ? packets : -1; } - } while (1); - - return -ENOMEM; -} - -/** - * ice_get_res - get a block of resources - * @pf: board private structure - * @res: pointer to the resource - * @needed: size of the block needed - * @id: identifier to track owner - * - * Returns the base item index of the block, or -ENOMEM for error - * The search_hint trick and lack of advanced fit-finding only works - * because we're highly likely to have all the same sized requests. - * Linear search time and any fragmentation should be minimal. - */ -static int -ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) -{ - int ret; - - if (!res || !pf) - return -EINVAL; - - if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { - dev_err(&pf->pdev->dev, - "param err: needed=%d, num_entries = %d id=0x%04x\n", - needed, res->num_entries, id); - return -EINVAL; - } - - /* search based on search_hint */ - ret = ice_search_res(res, needed, id); - - if (ret < 0) { - /* previous search failed. Reset search hint and try again */ - res->search_hint = 0; - ret = ice_search_res(res, needed, id); } - - return ret; -} - -/** - * ice_free_res - free a block of resources - * @res: pointer to the resource - * @index: starting index previously returned by ice_get_res - * @id: identifier to track owner - * Returns number of resources freed - */ -static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) -{ - int count = 0; - int i; - - if (!res || index >= res->num_entries) - return -EINVAL; - - id |= ICE_RES_VALID_BIT; - for (i = index; i < res->num_entries && res->list[i] == id; i++) { - res->list[i] = 0; - count++; - } - - return count; -} - -/** - * ice_add_mac_to_list - Add a mac address filter entry to the list - * @vsi: the VSI to be forwarded to - * @add_list: pointer to the list which contains MAC filter entries - * @macaddr: the MAC address to be added. - * - * Adds mac address filter entry to the temp list - * - * Returns 0 on success or ENOMEM on failure. - */ -static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, - const u8 *macaddr) -{ - struct ice_fltr_list_entry *tmp; - struct ice_pf *pf = vsi->back; - - tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); - if (!tmp) - return -ENOMEM; - - tmp->fltr_info.flag = ICE_FLTR_TX; - tmp->fltr_info.src = vsi->vsi_num; - tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; - tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num; - ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); - - INIT_LIST_HEAD(&tmp->list_entry); - list_add(&tmp->list_entry, add_list); - - return 0; } /** @@ -243,24 +155,6 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) } /** - * ice_free_fltr_list - free filter lists helper - * @dev: pointer to the device struct - * @h: pointer to the list head to be freed - * - * Helper function to free filter lists previously created using - * ice_add_mac_to_list - */ -static void ice_free_fltr_list(struct device *dev, struct list_head *h) -{ - struct ice_fltr_list_entry *e, *tmp; - - list_for_each_entry_safe(e, tmp, h, list_entry) { - list_del(&e->list_entry); - devm_kfree(dev, e); - } -} - -/** * ice_vsi_fltr_changed - check if filter state changed * @vsi: VSI to be checked * @@ -359,7 +253,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); if (vsi->current_netdev_flags & IFF_PROMISC) { /* Apply TX filter rule to get traffic from VMs */ - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true, + status = ice_cfg_dflt_vsi(hw, vsi->idx, true, ICE_FLTR_TX); if (status) { netdev_err(netdev, "Error setting default VSI %i tx rule\n", @@ -369,7 +263,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) goto out_promisc; } /* Apply RX filter rule to get traffic from wire */ - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true, + status = ice_cfg_dflt_vsi(hw, vsi->idx, true, ICE_FLTR_RX); if (status) { netdev_err(netdev, "Error setting default VSI %i rx rule\n", @@ -380,7 +274,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } } else { /* Clear TX filter rule to stop traffic from VMs */ - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false, + status = ice_cfg_dflt_vsi(hw, vsi->idx, false, ICE_FLTR_TX); if (status) { netdev_err(netdev, "Error clearing default VSI %i tx rule\n", @@ -389,8 +283,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) err = -EIO; goto out_promisc; } - /* Clear filter RX to remove traffic from wire */ - status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false, + /* Clear RX filter to remove traffic from wire */ + status = ice_cfg_dflt_vsi(hw, vsi->idx, false, ICE_FLTR_RX); if (status) { netdev_err(netdev, "Error clearing default VSI %i rx rule\n", @@ -438,15 +332,6 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf) } /** - * ice_is_reset_recovery_pending - schedule a reset - * @state: pf state field - */ -static bool ice_is_reset_recovery_pending(unsigned long int *state) -{ - return test_bit(__ICE_RESET_RECOVERY_PENDING, state); -} - -/** * ice_prepare_for_reset - prep for the core to reset * @pf: board private structure * @@ -456,23 +341,17 @@ static void ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - u32 v; - - ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num); - dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); + /* Notify VFs of impending reset */ + if (ice_check_sq_alive(hw, &hw->mailboxq)) + ice_vc_notify_reset(pf); /* disable the VSIs and their queues that are not already DOWN */ - /* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */ ice_pf_dis_all_vsi(pf); - ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - pf->vsi[v]->vsi_num = 0; - ice_shutdown_all_ctrlq(hw); + + set_bit(__ICE_PREPARED_FOR_RESET, pf->state); } /** @@ -489,27 +368,29 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); WARN_ON(in_interrupt()); - /* PFR is a bit of a special case because it doesn't result in an OICR - * interrupt. So for PFR, we prepare for reset, issue the reset and - * rebuild sequentially. - */ - if (reset_type == ICE_RESET_PFR) { - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); - ice_prepare_for_reset(pf); - } + ice_prepare_for_reset(pf); /* trigger the reset */ if (ice_reset(hw, reset_type)) { dev_err(dev, "reset %d failed\n", reset_type); set_bit(__ICE_RESET_FAILED, pf->state); - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_RESET_OICR_RECV, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); + clear_bit(__ICE_PFR_REQ, pf->state); + clear_bit(__ICE_CORER_REQ, pf->state); + clear_bit(__ICE_GLOBR_REQ, pf->state); return; } + /* PFR is a bit of a special case because it doesn't result in an OICR + * interrupt. So for PFR, rebuild after the reset and clear the reset- + * associated state bits. + */ if (reset_type == ICE_RESET_PFR) { pf->pfr_count++; ice_rebuild(pf); - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); + clear_bit(__ICE_PFR_REQ, pf->state); } } @@ -519,48 +400,60 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) */ static void ice_reset_subtask(struct ice_pf *pf) { - enum ice_reset_req reset_type; - - rtnl_lock(); + enum ice_reset_req reset_type = ICE_RESET_INVAL; /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an - * OICR interrupt. The OICR handler (ice_misc_intr) determines what - * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in - * pf->state. So if reset/recovery is pending (as indicated by this bit) - * we do a rebuild and return. + * OICR interrupt. The OICR handler (ice_misc_intr) determines what type + * of reset is pending and sets bits in pf->state indicating the reset + * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set + * prepare for pending reset if not already (for PF software-initiated + * global resets the software should already be prepared for it as + * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated + * by firmware or software on other PFs, that bit is not set so prepare + * for the reset now), poll for reset done, rebuild and return. */ - if (ice_is_reset_recovery_pending(pf->state)) { + if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { clear_bit(__ICE_GLOBR_RECV, pf->state); clear_bit(__ICE_CORER_RECV, pf->state); - ice_prepare_for_reset(pf); + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) + ice_prepare_for_reset(pf); /* make sure we are ready to rebuild */ - if (ice_check_reset(&pf->hw)) + if (ice_check_reset(&pf->hw)) { set_bit(__ICE_RESET_FAILED, pf->state); - else + } else { + /* done with reset. start rebuild */ + pf->hw.reset_ongoing = false; ice_rebuild(pf); - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); - goto unlock; + /* clear bit to resume normal operations, but + * ICE_NEEDS_RESTART bit is set incase rebuild failed + */ + clear_bit(__ICE_RESET_OICR_RECV, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); + clear_bit(__ICE_PFR_REQ, pf->state); + clear_bit(__ICE_CORER_REQ, pf->state); + clear_bit(__ICE_GLOBR_REQ, pf->state); + } + + return; } /* No pending resets to finish processing. Check for new resets */ - if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state)) - reset_type = ICE_RESET_GLOBR; - else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state)) - reset_type = ICE_RESET_CORER; - else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state)) + if (test_bit(__ICE_PFR_REQ, pf->state)) reset_type = ICE_RESET_PFR; - else - goto unlock; + if (test_bit(__ICE_CORER_REQ, pf->state)) + reset_type = ICE_RESET_CORER; + if (test_bit(__ICE_GLOBR_REQ, pf->state)) + reset_type = ICE_RESET_GLOBR; + /* If no valid reset type requested just return */ + if (reset_type == ICE_RESET_INVAL) + return; - /* reset if not already down or resetting */ + /* reset if not already down or busy */ if (!test_bit(__ICE_DOWN, pf->state) && !test_bit(__ICE_CFG_BUSY, pf->state)) { ice_do_reset(pf, reset_type); } - -unlock: - rtnl_unlock(); } /** @@ -772,6 +665,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) } } + ice_vc_notify_link_state(pf); + return 0; } @@ -822,6 +717,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) cq = &hw->adminq; qtype = "Admin"; break; + case ICE_CTL_Q_MAILBOX: + cq = &hw->mailboxq; + qtype = "Mailbox"; + break; default: dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", q_type); @@ -903,6 +802,12 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) dev_err(&pf->pdev->dev, "Could not handle link event\n"); break; + case ice_mbx_opc_send_msg_to_pf: + ice_vc_process_vf_msg(pf, &event); + break; + case ice_aqc_opc_fw_logging: + ice_output_fw_log(hw, &event.desc, event.msg_buf); + break; default: dev_dbg(&pf->pdev->dev, "%s Receive Queue unknown event 0x%04x ignored\n", @@ -959,6 +864,28 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) } /** + * ice_clean_mailboxq_subtask - clean the MailboxQ rings + * @pf: board private structure + */ +static void ice_clean_mailboxq_subtask(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + + if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state)) + return; + + if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) + return; + + clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); + + if (ice_ctrlq_pending(hw, &hw->mailboxq)) + __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); + + ice_flush(hw); +} + +/** * ice_service_task_schedule - schedule the service task to wake up * @pf: board private structure * @@ -966,8 +893,9 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) */ static void ice_service_task_schedule(struct ice_pf *pf) { - if (!test_bit(__ICE_DOWN, pf->state) && - !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state)) + if (!test_bit(__ICE_SERVICE_DIS, pf->state) && + !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && + !test_bit(__ICE_NEEDS_RESTART, pf->state)) queue_work(ice_wq, &pf->serv_task); } @@ -985,6 +913,22 @@ static void ice_service_task_complete(struct ice_pf *pf) } /** + * ice_service_task_stop - stop service task and cancel works + * @pf: board private structure + */ +static void ice_service_task_stop(struct ice_pf *pf) +{ + set_bit(__ICE_SERVICE_DIS, pf->state); + + if (pf->serv_tmr.function) + del_timer_sync(&pf->serv_tmr); + if (pf->serv_task.func) + cancel_work_sync(&pf->serv_task); + + clear_bit(__ICE_SERVICE_SCHED, pf->state); +} + +/** * ice_service_timer - timer callback to schedule service task * @t: pointer to timer_list */ @@ -997,6 +941,160 @@ static void ice_service_timer(struct timer_list *t) } /** + * ice_handle_mdd_event - handle malicious driver detect event + * @pf: pointer to the PF structure + * + * Called from service task. OICR interrupt handler indicates MDD event + */ +static void ice_handle_mdd_event(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + bool mdd_detected = false; + u32 reg; + int i; + + if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) + return; + + /* find what triggered the MDD event */ + reg = rd32(hw, GL_MDET_TX_PQM); + if (reg & GL_MDET_TX_PQM_VALID_M) { + u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> + GL_MDET_TX_PQM_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> + GL_MDET_TX_PQM_VF_NUM_S; + u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> + GL_MDET_TX_PQM_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> + GL_MDET_TX_PQM_QNUM_S); + + if (netif_msg_tx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_TX_PQM, 0xffffffff); + mdd_detected = true; + } + + reg = rd32(hw, GL_MDET_TX_TCLAN); + if (reg & GL_MDET_TX_TCLAN_VALID_M) { + u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> + GL_MDET_TX_TCLAN_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> + GL_MDET_TX_TCLAN_VF_NUM_S; + u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> + GL_MDET_TX_TCLAN_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> + GL_MDET_TX_TCLAN_QNUM_S); + + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); + mdd_detected = true; + } + + reg = rd32(hw, GL_MDET_RX); + if (reg & GL_MDET_RX_VALID_M) { + u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> + GL_MDET_RX_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> + GL_MDET_RX_VF_NUM_S; + u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> + GL_MDET_RX_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> + GL_MDET_RX_QNUM_S); + + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_RX, 0xffffffff); + mdd_detected = true; + } + + if (mdd_detected) { + bool pf_mdd_detected = false; + + reg = rd32(hw, PF_MDET_TX_PQM); + if (reg & PF_MDET_TX_PQM_VALID_M) { + wr32(hw, PF_MDET_TX_PQM, 0xFFFF); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + + reg = rd32(hw, PF_MDET_TX_TCLAN); + if (reg & PF_MDET_TX_TCLAN_VALID_M) { + wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + + reg = rd32(hw, PF_MDET_RX); + if (reg & PF_MDET_RX_VALID_M) { + wr32(hw, PF_MDET_RX, 0xFFFF); + dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + /* Queue belongs to the PF initiate a reset */ + if (pf_mdd_detected) { + set_bit(__ICE_NEEDS_RESTART, pf->state); + ice_service_task_schedule(pf); + } + } + + /* see if one of the VFs needs to be reset */ + for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { + struct ice_vf *vf = &pf->vf[i]; + + reg = rd32(hw, VP_MDET_TX_PQM(i)); + if (reg & VP_MDET_TX_PQM_VALID_M) { + wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); + } + + reg = rd32(hw, VP_MDET_TX_TCLAN(i)); + if (reg & VP_MDET_TX_TCLAN_VALID_M) { + wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); + } + + reg = rd32(hw, VP_MDET_TX_TDPU(i)); + if (reg & VP_MDET_TX_TDPU_VALID_M) { + wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); + } + + reg = rd32(hw, VP_MDET_RX(i)); + if (reg & VP_MDET_RX_VALID_M) { + wr32(hw, VP_MDET_RX(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", + i); + } + + if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) { + dev_info(&pf->pdev->dev, + "Too many MDD events on VF %d, disabled\n", i); + dev_info(&pf->pdev->dev, + "Use PF Control I/F to re-enable the VF\n"); + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + } + } + + /* re-enable MDD interrupt cause */ + clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); + reg = rd32(hw, PFINT_OICR_ENA); + reg |= PFINT_OICR_MAL_DETECT_M; + wr32(hw, PFINT_OICR_ENA, reg); + ice_flush(hw); +} + +/** * ice_service_task - manage and run subtasks * @work: pointer to work_struct contained by the PF struct */ @@ -1010,16 +1108,21 @@ static void ice_service_task(struct work_struct *work) /* process reset requests first */ ice_reset_subtask(pf); - /* bail if a reset/recovery cycle is pending */ - if (ice_is_reset_recovery_pending(pf->state) || - test_bit(__ICE_SUSPENDED, pf->state)) { + /* bail if a reset/recovery cycle is pending or rebuild failed */ + if (ice_is_reset_in_progress(pf->state) || + test_bit(__ICE_SUSPENDED, pf->state) || + test_bit(__ICE_NEEDS_RESTART, pf->state)) { ice_service_task_complete(pf); return; } + ice_check_for_hang_subtask(pf); ice_sync_fltr_subtask(pf); + ice_handle_mdd_event(pf); + ice_process_vflr_event(pf); ice_watchdog_subtask(pf); ice_clean_adminq_subtask(pf); + ice_clean_mailboxq_subtask(pf); /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ ice_service_task_complete(pf); @@ -1029,6 +1132,9 @@ static void ice_service_task(struct work_struct *work) * schedule the service task now. */ if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || + test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || + test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || + test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) mod_timer(&pf->serv_tmr, jiffies); } @@ -1043,6 +1149,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) hw->adminq.num_sq_entries = ICE_AQ_LEN; hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; + hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; + hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; + hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; + hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; } /** @@ -1073,57 +1183,6 @@ static void ice_irq_affinity_notify(struct irq_affinity_notify *notify, static void ice_irq_affinity_release(struct kref __always_unused *ref) {} /** - * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI - * @vsi: the VSI being un-configured - */ -static void ice_vsi_dis_irq(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - int base = vsi->base_vector; - u32 val; - int i; - - /* disable interrupt causation from each queue */ - if (vsi->tx_rings) { - ice_for_each_txq(vsi, i) { - if (vsi->tx_rings[i]) { - u16 reg; - - reg = vsi->tx_rings[i]->reg_idx; - val = rd32(hw, QINT_TQCTL(reg)); - val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(reg), val); - } - } - } - - if (vsi->rx_rings) { - ice_for_each_rxq(vsi, i) { - if (vsi->rx_rings[i]) { - u16 reg; - - reg = vsi->rx_rings[i]->reg_idx; - val = rd32(hw, QINT_RQCTL(reg)); - val &= ~QINT_RQCTL_CAUSE_ENA_M; - wr32(hw, QINT_RQCTL(reg), val); - } - } - } - - /* disable each interrupt */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - for (i = vsi->base_vector; - i < (vsi->num_q_vectors + vsi->base_vector); i++) - wr32(hw, GLINT_DYN_CTL(i), 0); - - ice_flush(hw); - for (i = 0; i < vsi->num_q_vectors; i++) - synchronize_irq(pf->msix_entries[i + base].vector); - } -} - -/** * ice_vsi_ena_irq - Enable IRQ for the given VSI * @vsi: the VSI being configured */ @@ -1144,26 +1203,6 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi) } /** - * ice_vsi_delete - delete a VSI from the switch - * @vsi: pointer to VSI being removed - */ -static void ice_vsi_delete(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - struct ice_vsi_ctx ctxt; - enum ice_status status; - - ctxt.vsi_num = vsi->vsi_num; - - memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); - - status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); - if (status) - dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", - vsi->vsi_num); -} - -/** * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI * @vsi: the VSI being configured * @basename: name for the vector @@ -1172,7 +1211,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) { int q_vectors = vsi->num_q_vectors; struct ice_pf *pf = vsi->back; - int base = vsi->base_vector; + int base = vsi->sw_base_vector; int rx_int_idx = 0; int tx_int_idx = 0; int vector, err; @@ -1231,467 +1270,6 @@ free_q_irqs: } /** - * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type - * @vsi: the VSI being configured - */ -static void ice_vsi_set_rss_params(struct ice_vsi *vsi) -{ - struct ice_hw_common_caps *cap; - struct ice_pf *pf = vsi->back; - - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { - vsi->rss_size = 1; - return; - } - - cap = &pf->hw.func_caps.common_cap; - switch (vsi->type) { - case ICE_VSI_PF: - /* PF VSI will inherit RSS instance of PF */ - vsi->rss_table_size = cap->rss_table_size; - vsi->rss_size = min_t(int, num_online_cpus(), - BIT(cap->rss_table_entry_width)); - vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; - break; - default: - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); - break; - } -} - -/** - * ice_vsi_setup_q_map - Setup a VSI queue map - * @vsi: the VSI being configured - * @ctxt: VSI context structure - */ -static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) -{ - u16 offset = 0, qmap = 0, numq_tc; - u16 pow = 0, max_rss = 0, qcount; - u16 qcount_tx = vsi->alloc_txq; - u16 qcount_rx = vsi->alloc_rxq; - bool ena_tc0 = false; - int i; - - /* at least TC0 should be enabled by default */ - if (vsi->tc_cfg.numtc) { - if (!(vsi->tc_cfg.ena_tc & BIT(0))) - ena_tc0 = true; - } else { - ena_tc0 = true; - } - - if (ena_tc0) { - vsi->tc_cfg.numtc++; - vsi->tc_cfg.ena_tc |= 1; - } - - numq_tc = qcount_rx / vsi->tc_cfg.numtc; - - /* TC mapping is a function of the number of Rx queues assigned to the - * VSI for each traffic class and the offset of these queues. - * The first 10 bits are for queue offset for TC0, next 4 bits for no:of - * queues allocated to TC0. No:of queues is a power-of-2. - * - * If TC is not enabled, the queue offset is set to 0, and allocate one - * queue, this way, traffic for the given TC will be sent to the default - * queue. - * - * Setup number and offset of Rx queues for all TCs for the VSI - */ - - /* qcount will change if RSS is enabled */ - if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { - if (vsi->type == ICE_VSI_PF) - max_rss = ICE_MAX_LG_RSS_QS; - else - max_rss = ICE_MAX_SMALL_RSS_QS; - - qcount = min_t(int, numq_tc, max_rss); - qcount = min_t(int, qcount, vsi->rss_size); - } else { - qcount = numq_tc; - } - - /* find the (rounded up) power-of-2 of qcount */ - pow = order_base_2(qcount); - - for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { - if (!(vsi->tc_cfg.ena_tc & BIT(i))) { - /* TC is not enabled */ - vsi->tc_cfg.tc_info[i].qoffset = 0; - vsi->tc_cfg.tc_info[i].qcount = 1; - ctxt->info.tc_mapping[i] = 0; - continue; - } - - /* TC is enabled */ - vsi->tc_cfg.tc_info[i].qoffset = offset; - vsi->tc_cfg.tc_info[i].qcount = qcount; - - qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & - ICE_AQ_VSI_TC_Q_OFFSET_M) | - ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & - ICE_AQ_VSI_TC_Q_NUM_M); - offset += qcount; - ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); - } - - vsi->num_txq = qcount_tx; - vsi->num_rxq = offset; - - /* Rx queue mapping */ - ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); - /* q_mapping buffer holds the info for the first queue allocated for - * this VSI in the PF space and also the number of queues associated - * with this VSI. - */ - ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); - ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); -} - -/** - * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI - * @ctxt: the VSI context being set - * - * This initializes a default VSI context for all sections except the Queues. - */ -static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) -{ - u32 table = 0; - - memset(&ctxt->info, 0, sizeof(ctxt->info)); - /* VSI's should be allocated from shared pool */ - ctxt->alloc_from_pool = true; - /* Src pruning enabled by default */ - ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; - /* Traffic from VSI can be sent to LAN */ - ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; - - /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy - * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all - * packets untagged/tagged. - */ - ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & - ICE_AQ_VSI_VLAN_MODE_M) >> - ICE_AQ_VSI_VLAN_MODE_S); - - /* Have 1:1 UP mapping for both ingress/egress tables */ - table |= ICE_UP_TABLE_TRANSLATE(0, 0); - table |= ICE_UP_TABLE_TRANSLATE(1, 1); - table |= ICE_UP_TABLE_TRANSLATE(2, 2); - table |= ICE_UP_TABLE_TRANSLATE(3, 3); - table |= ICE_UP_TABLE_TRANSLATE(4, 4); - table |= ICE_UP_TABLE_TRANSLATE(5, 5); - table |= ICE_UP_TABLE_TRANSLATE(6, 6); - table |= ICE_UP_TABLE_TRANSLATE(7, 7); - ctxt->info.ingress_table = cpu_to_le32(table); - ctxt->info.egress_table = cpu_to_le32(table); - /* Have 1:1 UP mapping for outer to inner UP table */ - ctxt->info.outer_up_table = cpu_to_le32(table); - /* No Outer tag support outer_tag_flags remains to zero */ -} - -/** - * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI - * @ctxt: the VSI context being set - * @vsi: the VSI being configured - */ -static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) -{ - u8 lut_type, hash_type; - - switch (vsi->type) { - case ICE_VSI_PF: - /* PF VSI will inherit RSS instance of PF */ - lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; - hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; - break; - default: - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", - vsi->type); - return; - } - - ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & - ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | - ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & - ICE_AQ_VSI_Q_OPT_RSS_HASH_M); -} - -/** - * ice_vsi_add - Create a new VSI or fetch preallocated VSI - * @vsi: the VSI being configured - * - * This initializes a VSI context depending on the VSI type to be added and - * passes it down to the add_vsi aq command to create a new VSI. - */ -static int ice_vsi_add(struct ice_vsi *vsi) -{ - struct ice_vsi_ctx ctxt = { 0 }; - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - int ret = 0; - - switch (vsi->type) { - case ICE_VSI_PF: - ctxt.flags = ICE_AQ_VSI_TYPE_PF; - break; - default: - return -ENODEV; - } - - ice_set_dflt_vsi_ctx(&ctxt); - /* if the switch is in VEB mode, allow VSI loopback */ - if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) - ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; - - /* Set LUT type and HASH type if RSS is enabled */ - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - ice_set_rss_vsi_ctx(&ctxt, vsi); - - ctxt.info.sw_id = vsi->port_info->sw_id; - ice_vsi_setup_q_map(vsi, &ctxt); - - ret = ice_aq_add_vsi(hw, &ctxt, NULL); - if (ret) { - dev_err(&vsi->back->pdev->dev, - "Add VSI AQ call failed, err %d\n", ret); - return -EIO; - } - vsi->info = ctxt.info; - vsi->vsi_num = ctxt.vsi_num; - - return ret; -} - -/** - * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW - * @vsi: the VSI being cleaned up - */ -static void ice_vsi_release_msix(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - u16 vector = vsi->base_vector; - struct ice_hw *hw = &pf->hw; - u32 txq = 0; - u32 rxq = 0; - int i, q; - - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { - struct ice_q_vector *q_vector = vsi->q_vectors[i]; - - wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0); - wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0); - for (q = 0; q < q_vector->num_ring_tx; q++) { - wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); - txq++; - } - - for (q = 0; q < q_vector->num_ring_rx; q++) { - wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); - rxq++; - } - } - - ice_flush(hw); -} - -/** - * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI - * @vsi: the VSI having rings deallocated - */ -static void ice_vsi_clear_rings(struct ice_vsi *vsi) -{ - int i; - - if (vsi->tx_rings) { - for (i = 0; i < vsi->alloc_txq; i++) { - if (vsi->tx_rings[i]) { - kfree_rcu(vsi->tx_rings[i], rcu); - vsi->tx_rings[i] = NULL; - } - } - } - if (vsi->rx_rings) { - for (i = 0; i < vsi->alloc_rxq; i++) { - if (vsi->rx_rings[i]) { - kfree_rcu(vsi->rx_rings[i], rcu); - vsi->rx_rings[i] = NULL; - } - } - } -} - -/** - * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI - * @vsi: VSI which is having rings allocated - */ -static int ice_vsi_alloc_rings(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int i; - - /* Allocate tx_rings */ - for (i = 0; i < vsi->alloc_txq; i++) { - struct ice_ring *ring; - - /* allocate with kzalloc(), free with kfree_rcu() */ - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - - if (!ring) - goto err_out; - - ring->q_index = i; - ring->reg_idx = vsi->txq_map[i]; - ring->ring_active = false; - ring->vsi = vsi; - ring->netdev = vsi->netdev; - ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; - - vsi->tx_rings[i] = ring; - } - - /* Allocate rx_rings */ - for (i = 0; i < vsi->alloc_rxq; i++) { - struct ice_ring *ring; - - /* allocate with kzalloc(), free with kfree_rcu() */ - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) - goto err_out; - - ring->q_index = i; - ring->reg_idx = vsi->rxq_map[i]; - ring->ring_active = false; - ring->vsi = vsi; - ring->netdev = vsi->netdev; - ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; - vsi->rx_rings[i] = ring; - } - - return 0; - -err_out: - ice_vsi_clear_rings(vsi); - return -ENOMEM; -} - -/** - * ice_vsi_free_irq - Free the irq association with the OS - * @vsi: the VSI being configured - */ -static void ice_vsi_free_irq(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int base = vsi->base_vector; - - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - int i; - - if (!vsi->q_vectors || !vsi->irqs_ready) - return; - - vsi->irqs_ready = false; - for (i = 0; i < vsi->num_q_vectors; i++) { - u16 vector = i + base; - int irq_num; - - irq_num = pf->msix_entries[vector].vector; - - /* free only the irqs that were actually requested */ - if (!vsi->q_vectors[i] || - !(vsi->q_vectors[i]->num_ring_tx || - vsi->q_vectors[i]->num_ring_rx)) - continue; - - /* clear the affinity notifier in the IRQ descriptor */ - irq_set_affinity_notifier(irq_num, NULL); - - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(irq_num, NULL); - synchronize_irq(irq_num); - devm_free_irq(&pf->pdev->dev, irq_num, - vsi->q_vectors[i]); - } - ice_vsi_release_msix(vsi); - } -} - -/** - * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW - * @vsi: the VSI being configured - */ -static void ice_vsi_cfg_msix(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - u16 vector = vsi->base_vector; - struct ice_hw *hw = &pf->hw; - u32 txq = 0, rxq = 0; - int i, q, itr; - u8 itr_gran; - - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { - struct ice_q_vector *q_vector = vsi->q_vectors[i]; - - itr_gran = hw->itr_gran_200; - - if (q_vector->num_ring_rx) { - q_vector->rx.itr = - ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting, - itr_gran); - q_vector->rx.latency_range = ICE_LOW_LATENCY; - } - - if (q_vector->num_ring_tx) { - q_vector->tx.itr = - ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting, - itr_gran); - q_vector->tx.latency_range = ICE_LOW_LATENCY; - } - wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr); - wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr); - - /* Both Transmit Queue Interrupt Cause Control register - * and Receive Queue Interrupt Cause control register - * expects MSIX_INDX field to be the vector index - * within the function space and not the absolute - * vector index across PF or across device. - * For SR-IOV VF VSIs queue vector index always starts - * with 1 since first vector index(0) is used for OICR - * in VF space. Since VMDq and other PF VSIs are withtin - * the PF function space, use the vector index thats - * tracked for this PF. - */ - for (q = 0; q < q_vector->num_ring_tx; q++) { - u32 val; - - itr = ICE_TX_ITR; - val = QINT_TQCTL_CAUSE_ENA_M | - (itr << QINT_TQCTL_ITR_INDX_S) | - (vector << QINT_TQCTL_MSIX_INDX_S); - wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); - txq++; - } - - for (q = 0; q < q_vector->num_ring_rx; q++) { - u32 val; - - itr = ICE_RX_ITR; - val = QINT_RQCTL_CAUSE_ENA_M | - (itr << QINT_RQCTL_ITR_INDX_S) | - (vector << QINT_RQCTL_MSIX_INDX_S); - wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); - rxq++; - } - } - - ice_flush(hw); -} - -/** * ice_ena_misc_vector - enable the non-queue interrupts * @pf: board private structure */ @@ -1708,13 +1286,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf) PFINT_OICR_MAL_DETECT_M | PFINT_OICR_GRST_M | PFINT_OICR_PCI_EXCEPTION_M | + PFINT_OICR_VFLR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_CRITERR_M); wr32(hw, PFINT_OICR_ENA, val); /* SW_ITR_IDX = 0, but don't change INTENA */ - wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), + wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx), GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); } @@ -1731,12 +1310,23 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) u32 oicr, ena_mask; set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); + set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA); + if (oicr & PFINT_OICR_MAL_DETECT_M) { + ena_mask &= ~PFINT_OICR_MAL_DETECT_M; + set_bit(__ICE_MDD_EVENT_PENDING, pf->state); + } + if (oicr & PFINT_OICR_VFLR_M) { + ena_mask &= ~PFINT_OICR_VFLR_M; + set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); + } + if (oicr & PFINT_OICR_GRST_M) { u32 reset; + /* we have a reset warning */ ena_mask &= ~PFINT_OICR_GRST_M; reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> @@ -1746,15 +1336,18 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) pf->corer_count++; else if (reset == ICE_RESET_GLOBR) pf->globr_count++; - else + else if (reset == ICE_RESET_EMPR) pf->empr_count++; + else + dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n", + reset); /* If a reset cycle isn't already in progress, we set a bit in * pf->state so that the service task can start a reset/rebuild. * We also make note of which reset happened so that peer * devices/drivers can be informed. */ - if (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) { + if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) { if (reset == ICE_RESET_CORER) set_bit(__ICE_CORER_RECV, pf->state); else if (reset == ICE_RESET_GLOBR) @@ -1762,7 +1355,20 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) else set_bit(__ICE_EMPR_RECV, pf->state); - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + /* There are couple of different bits at play here. + * hw->reset_ongoing indicates whether the hardware is + * in reset. This is set to true when a reset interrupt + * is received and set back to false after the driver + * has determined that the hardware is out of reset. + * + * __ICE_RESET_OICR_RECV in pf->state indicates + * that a post reset rebuild is required before the + * driver is operational again. This is set above. + * + * As this is the start of the reset/rebuild cycle, set + * both to indicate that. + */ + hw->reset_ongoing = true; } } @@ -1803,208 +1409,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) } /** - * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors - * @vsi: the VSI being configured - * - * This function maps descriptor rings to the queue-specific vectors allotted - * through the MSI-X enabling code. On a constrained vector budget, we map Tx - * and Rx rings to the vector as "efficiently" as possible. - */ -static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) -{ - int q_vectors = vsi->num_q_vectors; - int tx_rings_rem, rx_rings_rem; - int v_id; - - /* initially assigning remaining rings count to VSIs num queue value */ - tx_rings_rem = vsi->num_txq; - rx_rings_rem = vsi->num_rxq; - - for (v_id = 0; v_id < q_vectors; v_id++) { - struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; - int tx_rings_per_v, rx_rings_per_v, q_id, q_base; - - /* Tx rings mapping to vector */ - tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); - q_vector->num_ring_tx = tx_rings_per_v; - q_vector->tx.ring = NULL; - q_base = vsi->num_txq - tx_rings_rem; - - for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { - struct ice_ring *tx_ring = vsi->tx_rings[q_id]; - - tx_ring->q_vector = q_vector; - tx_ring->next = q_vector->tx.ring; - q_vector->tx.ring = tx_ring; - } - tx_rings_rem -= tx_rings_per_v; - - /* Rx rings mapping to vector */ - rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); - q_vector->num_ring_rx = rx_rings_per_v; - q_vector->rx.ring = NULL; - q_base = vsi->num_rxq - rx_rings_rem; - - for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { - struct ice_ring *rx_ring = vsi->rx_rings[q_id]; - - rx_ring->q_vector = q_vector; - rx_ring->next = q_vector->rx.ring; - q_vector->rx.ring = rx_ring; - } - rx_rings_rem -= rx_rings_per_v; - } -} - -/** - * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI - * @vsi: the VSI being configured - * - * Return 0 on success and a negative value on error - */ -static void ice_vsi_set_num_qs(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - - switch (vsi->type) { - case ICE_VSI_PF: - vsi->alloc_txq = pf->num_lan_tx; - vsi->alloc_rxq = pf->num_lan_rx; - vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); - vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); - break; - default: - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", - vsi->type); - break; - } -} - -/** - * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi - * @vsi: VSI pointer - * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. - * - * On error: returns error code (negative) - * On success: returns 0 - */ -static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors) -{ - struct ice_pf *pf = vsi->back; - - /* allocate memory for both Tx and Rx ring pointers */ - vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, - sizeof(struct ice_ring *), GFP_KERNEL); - if (!vsi->tx_rings) - goto err_txrings; - - vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, - sizeof(struct ice_ring *), GFP_KERNEL); - if (!vsi->rx_rings) - goto err_rxrings; - - if (alloc_qvectors) { - /* allocate memory for q_vector pointers */ - vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, - vsi->num_q_vectors, - sizeof(struct ice_q_vector *), - GFP_KERNEL); - if (!vsi->q_vectors) - goto err_vectors; - } - - return 0; - -err_vectors: - devm_kfree(&pf->pdev->dev, vsi->rx_rings); -err_rxrings: - devm_kfree(&pf->pdev->dev, vsi->tx_rings); -err_txrings: - return -ENOMEM; -} - -/** - * ice_msix_clean_rings - MSIX mode Interrupt Handler - * @irq: interrupt number - * @data: pointer to a q_vector - */ -static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) -{ - struct ice_q_vector *q_vector = (struct ice_q_vector *)data; - - if (!q_vector->tx.ring && !q_vector->rx.ring) - return IRQ_HANDLED; - - napi_schedule(&q_vector->napi); - - return IRQ_HANDLED; -} - -/** - * ice_vsi_alloc - Allocates the next available struct vsi in the PF - * @pf: board private structure - * @type: type of VSI - * - * returns a pointer to a VSI on success, NULL on failure. - */ -static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) -{ - struct ice_vsi *vsi = NULL; - - /* Need to protect the allocation of the VSIs at the PF level */ - mutex_lock(&pf->sw_mutex); - - /* If we have already allocated our maximum number of VSIs, - * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index - * is available to be populated - */ - if (pf->next_vsi == ICE_NO_VSI) { - dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); - goto unlock_pf; - } - - vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); - if (!vsi) - goto unlock_pf; - - vsi->type = type; - vsi->back = pf; - set_bit(__ICE_DOWN, vsi->state); - vsi->idx = pf->next_vsi; - vsi->work_lmt = ICE_DFLT_IRQ_WORK; - - ice_vsi_set_num_qs(vsi); - - switch (vsi->type) { - case ICE_VSI_PF: - if (ice_vsi_alloc_arrays(vsi, true)) - goto err_rings; - - /* Setup default MSIX irq handler for VSI */ - vsi->irq_handler = ice_msix_clean_rings; - break; - default: - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); - goto unlock_pf; - } - - /* fill VSI slot in the PF struct */ - pf->vsi[pf->next_vsi] = vsi; - - /* prepare pf->next_vsi for next use */ - pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, - pf->next_vsi); - goto unlock_pf; - -err_rings: - devm_kfree(&pf->pdev->dev, vsi); - vsi = NULL; -unlock_pf: - mutex_unlock(&pf->sw_mutex); - return vsi; -} - -/** * ice_free_irq_msix_misc - Unroll misc vector setup * @pf: board private structure */ @@ -2015,12 +1419,15 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) ice_flush(&pf->hw); if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { - synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); + synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector); devm_free_irq(&pf->pdev->dev, - pf->msix_entries[pf->oicr_idx].vector, pf); + pf->msix_entries[pf->sw_oicr_idx].vector, pf); } - ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); + pf->num_avail_sw_msix += 1; + ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID); + pf->num_avail_hw_msix += 1; + ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID); } /** @@ -2047,42 +1454,61 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) * lost during reset. Note that this function is called only during * rebuild path and not while reset is in progress. */ - if (ice_is_reset_recovery_pending(pf->state)) + if (ice_is_reset_in_progress(pf->state)) goto skip_req_irq; - /* reserve one vector in irq_tracker for misc interrupts */ - oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); + /* reserve one vector in sw_irq_tracker for misc interrupts */ + oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); if (oicr_idx < 0) return oicr_idx; - pf->oicr_idx = oicr_idx; + pf->num_avail_sw_msix -= 1; + pf->sw_oicr_idx = oicr_idx; + + /* reserve one vector in hw_irq_tracker for misc interrupts */ + oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); + if (oicr_idx < 0) { + ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); + pf->num_avail_sw_msix += 1; + return oicr_idx; + } + pf->num_avail_hw_msix -= 1; + pf->hw_oicr_idx = oicr_idx; err = devm_request_irq(&pf->pdev->dev, - pf->msix_entries[pf->oicr_idx].vector, + pf->msix_entries[pf->sw_oicr_idx].vector, ice_misc_intr, 0, pf->int_name, pf); if (err) { dev_err(&pf->pdev->dev, "devm_request_irq for %s failed: %d\n", pf->int_name, err); - ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); + ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); + pf->num_avail_sw_msix += 1; + ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); + pf->num_avail_hw_msix += 1; return err; } skip_req_irq: ice_ena_misc_vector(pf); - val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | + val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | PFINT_OICR_CTL_CAUSE_ENA_M); wr32(hw, PFINT_OICR_CTL, val); /* This enables Admin queue Interrupt causes */ - val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | + val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | PFINT_FW_CTL_CAUSE_ENA_M); wr32(hw, PFINT_FW_CTL, val); - itr_gran = hw->itr_gran_200; + /* This enables Mailbox queue Interrupt causes */ + val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) | + PFINT_MBX_CTL_CAUSE_ENA_M); + wr32(hw, PFINT_MBX_CTL, val); + + itr_gran = hw->itr_gran; - wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), + wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), ITR_TO_REG(ICE_ITR_8K, itr_gran)); ice_flush(hw); @@ -2092,209 +1518,43 @@ skip_req_irq: } /** - * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI - * @vsi: the VSI getting queues - * - * Return 0 on success and a negative value on error - */ -static int ice_vsi_get_qs_contig(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int offset, ret = 0; - - mutex_lock(&pf->avail_q_mutex); - /* look for contiguous block of queues for tx */ - offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS, - 0, vsi->alloc_txq, 0); - if (offset < ICE_MAX_TXQS) { - int i; - - bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq); - for (i = 0; i < vsi->alloc_txq; i++) - vsi->txq_map[i] = i + offset; - } else { - ret = -ENOMEM; - vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER; - } - - /* look for contiguous block of queues for rx */ - offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS, - 0, vsi->alloc_rxq, 0); - if (offset < ICE_MAX_RXQS) { - int i; - - bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq); - for (i = 0; i < vsi->alloc_rxq; i++) - vsi->rxq_map[i] = i + offset; - } else { - ret = -ENOMEM; - vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER; - } - mutex_unlock(&pf->avail_q_mutex); - - return ret; -} - -/** - * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI - * @vsi: the VSI getting queues - * - * Return 0 on success and a negative value on error + * ice_napi_del - Remove NAPI handler for the VSI + * @vsi: VSI for which NAPI handler is to be removed */ -static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) +static void ice_napi_del(struct ice_vsi *vsi) { - struct ice_pf *pf = vsi->back; - int i, index = 0; - - mutex_lock(&pf->avail_q_mutex); - - if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) { - for (i = 0; i < vsi->alloc_txq; i++) { - index = find_next_zero_bit(pf->avail_txqs, - ICE_MAX_TXQS, index); - if (index < ICE_MAX_TXQS) { - set_bit(index, pf->avail_txqs); - vsi->txq_map[i] = index; - } else { - goto err_scatter_tx; - } - } - } - - if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) { - for (i = 0; i < vsi->alloc_rxq; i++) { - index = find_next_zero_bit(pf->avail_rxqs, - ICE_MAX_RXQS, index); - if (index < ICE_MAX_RXQS) { - set_bit(index, pf->avail_rxqs); - vsi->rxq_map[i] = index; - } else { - goto err_scatter_rx; - } - } - } - - mutex_unlock(&pf->avail_q_mutex); - return 0; + int v_idx; -err_scatter_rx: - /* unflag any queues we have grabbed (i is failed position) */ - for (index = 0; index < i; index++) { - clear_bit(vsi->rxq_map[index], pf->avail_rxqs); - vsi->rxq_map[index] = 0; - } - i = vsi->alloc_txq; -err_scatter_tx: - /* i is either position of failed attempt or vsi->alloc_txq */ - for (index = 0; index < i; index++) { - clear_bit(vsi->txq_map[index], pf->avail_txqs); - vsi->txq_map[index] = 0; - } + if (!vsi->netdev) + return; - mutex_unlock(&pf->avail_q_mutex); - return -ENOMEM; + for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) + netif_napi_del(&vsi->q_vectors[v_idx]->napi); } /** - * ice_vsi_get_qs - Assign queues from PF to VSI - * @vsi: the VSI to assign queues to + * ice_napi_add - register NAPI handler for the VSI + * @vsi: VSI for which NAPI handler is to be registered * - * Returns 0 on success and a negative value on error - */ -static int ice_vsi_get_qs(struct ice_vsi *vsi) -{ - int ret = 0; - - vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; - vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; - - /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping - * modes individually to scatter if assigning contiguous queues - * to rx or tx fails - */ - ret = ice_vsi_get_qs_contig(vsi); - if (ret < 0) { - if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) - vsi->alloc_txq = max_t(u16, vsi->alloc_txq, - ICE_MAX_SCATTER_TXQS); - if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) - vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq, - ICE_MAX_SCATTER_RXQS); - ret = ice_vsi_get_qs_scatter(vsi); - } - - return ret; -} - -/** - * ice_vsi_put_qs - Release queues from VSI to PF - * @vsi: the VSI thats going to release queues - */ -static void ice_vsi_put_qs(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int i; - - mutex_lock(&pf->avail_q_mutex); - - for (i = 0; i < vsi->alloc_txq; i++) { - clear_bit(vsi->txq_map[i], pf->avail_txqs); - vsi->txq_map[i] = ICE_INVAL_Q_INDEX; - } - - for (i = 0; i < vsi->alloc_rxq; i++) { - clear_bit(vsi->rxq_map[i], pf->avail_rxqs); - vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; - } - - mutex_unlock(&pf->avail_q_mutex); -} - -/** - * ice_free_q_vector - Free memory allocated for a specific interrupt vector - * @vsi: VSI having the memory freed - * @v_idx: index of the vector to be freed + * This function is only called in the driver's load path. Registering the NAPI + * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, + * reset/rebuild, etc.) */ -static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) +static void ice_napi_add(struct ice_vsi *vsi) { - struct ice_q_vector *q_vector; - struct ice_ring *ring; + int v_idx; - if (!vsi->q_vectors[v_idx]) { - dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n", - v_idx); + if (!vsi->netdev) return; - } - q_vector = vsi->q_vectors[v_idx]; - - ice_for_each_ring(ring, q_vector->tx) - ring->q_vector = NULL; - ice_for_each_ring(ring, q_vector->rx) - ring->q_vector = NULL; - - /* only VSI with an associated netdev is set up with NAPI */ - if (vsi->netdev) - netif_napi_del(&q_vector->napi); - - devm_kfree(&vsi->back->pdev->dev, q_vector); - vsi->q_vectors[v_idx] = NULL; -} - -/** - * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors - * @vsi: the VSI having memory freed - */ -static void ice_vsi_free_q_vectors(struct ice_vsi *vsi) -{ - int v_idx; for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) - ice_free_q_vector(vsi, v_idx); + netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, + ice_napi_poll, NAPI_POLL_WEIGHT); } /** - * ice_cfg_netdev - Setup the netdev flags - * @vsi: the VSI being configured + * ice_cfg_netdev - Allocate, configure and register a netdev + * @vsi: the VSI associated with the new netdev * * Returns 0 on success, negative value on failure */ @@ -2307,6 +1567,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) struct ice_netdev_priv *np; struct net_device *netdev; u8 mac_addr[ETH_ALEN]; + int err; netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), vsi->alloc_txq, vsi->alloc_rxq); @@ -2364,195 +1625,14 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = ICE_MAX_MTU; - return 0; -} - -/** - * ice_vsi_free_arrays - clean up vsi resources - * @vsi: pointer to VSI being cleared - * @free_qvectors: bool to specify if q_vectors should be deallocated - */ -static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors) -{ - struct ice_pf *pf = vsi->back; - - /* free the ring and vector containers */ - if (free_qvectors && vsi->q_vectors) { - devm_kfree(&pf->pdev->dev, vsi->q_vectors); - vsi->q_vectors = NULL; - } - if (vsi->tx_rings) { - devm_kfree(&pf->pdev->dev, vsi->tx_rings); - vsi->tx_rings = NULL; - } - if (vsi->rx_rings) { - devm_kfree(&pf->pdev->dev, vsi->rx_rings); - vsi->rx_rings = NULL; - } -} - -/** - * ice_vsi_clear - clean up and deallocate the provided vsi - * @vsi: pointer to VSI being cleared - * - * This deallocates the vsi's queue resources, removes it from the PF's - * VSI array if necessary, and deallocates the VSI - * - * Returns 0 on success, negative on failure - */ -static int ice_vsi_clear(struct ice_vsi *vsi) -{ - struct ice_pf *pf = NULL; - - if (!vsi) - return 0; - - if (!vsi->back) - return -EINVAL; - - pf = vsi->back; - - if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { - dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", - vsi->idx); - return -EINVAL; - } - - mutex_lock(&pf->sw_mutex); - /* updates the PF for this cleared vsi */ - - pf->vsi[vsi->idx] = NULL; - if (vsi->idx < pf->next_vsi) - pf->next_vsi = vsi->idx; - - ice_vsi_free_arrays(vsi, true); - mutex_unlock(&pf->sw_mutex); - devm_kfree(&pf->pdev->dev, vsi); - - return 0; -} - -/** - * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector - * @vsi: the VSI being configured - * @v_idx: index of the vector in the vsi struct - * - * We allocate one q_vector. If allocation fails we return -ENOMEM. - */ -static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) -{ - struct ice_pf *pf = vsi->back; - struct ice_q_vector *q_vector; - - /* allocate q_vector */ - q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); - if (!q_vector) - return -ENOMEM; - - q_vector->vsi = vsi; - q_vector->v_idx = v_idx; - /* only set affinity_mask if the CPU is online */ - if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); - - if (vsi->netdev) - netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, - NAPI_POLL_WEIGHT); - /* tie q_vector and vsi together */ - vsi->q_vectors[v_idx] = q_vector; - - return 0; -} - -/** - * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors - * @vsi: the VSI being configured - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - */ -static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int v_idx = 0, num_q_vectors; - int err; - - if (vsi->q_vectors[0]) { - dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", - vsi->vsi_num); - return -EEXIST; - } - - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - num_q_vectors = vsi->num_q_vectors; - } else { - err = -EINVAL; - goto err_out; - } - - for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { - err = ice_vsi_alloc_q_vector(vsi, v_idx); - if (err) - goto err_out; - } - - return 0; - -err_out: - while (v_idx--) - ice_free_q_vector(vsi, v_idx); - - dev_err(&pf->pdev->dev, - "Failed to allocate %d q_vector for VSI %d, ret=%d\n", - vsi->num_q_vectors, vsi->vsi_num, err); - vsi->num_q_vectors = 0; - return err; -} - -/** - * ice_vsi_setup_vector_base - Set up the base vector for the given VSI - * @vsi: ptr to the VSI - * - * This should only be called after ice_vsi_alloc() which allocates the - * corresponding SW VSI structure and initializes num_queue_pairs for the - * newly allocated VSI. - * - * Returns 0 on success or negative on failure - */ -static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int num_q_vectors = 0; - - if (vsi->base_vector) { - dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", - vsi->vsi_num, vsi->base_vector); - return -EEXIST; - } - - if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - return -ENOENT; - - switch (vsi->type) { - case ICE_VSI_PF: - num_q_vectors = vsi->num_q_vectors; - break; - default: - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", - vsi->type); - break; - } + err = register_netdev(vsi->netdev); + if (err) + return err; - if (num_q_vectors) - vsi->base_vector = ice_get_res(pf, pf->irq_tracker, - num_q_vectors, vsi->idx); + netif_carrier_off(vsi->netdev); - if (vsi->base_vector < 0) { - dev_err(&pf->pdev->dev, - "Failed to get tracking for %d vectors for VSI %d, err=%d\n", - num_q_vectors, vsi->vsi_num, vsi->base_vector); - return -ENOENT; - } + /* make sure transmit queues start off as stopped */ + netif_tx_stop_all_queues(vsi->netdev); return 0; } @@ -2572,327 +1652,17 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) } /** - * ice_vsi_cfg_rss - Configure RSS params for a VSI - * @vsi: VSI to be configured - */ -static int ice_vsi_cfg_rss(struct ice_vsi *vsi) -{ - u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; - struct ice_aqc_get_set_rss_keys *key; - struct ice_pf *pf = vsi->back; - enum ice_status status; - int err = 0; - u8 *lut; - - vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); - - lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); - if (!lut) - return -ENOMEM; - - if (vsi->rss_lut_user) - memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); - else - ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); - - status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type, - lut, vsi->rss_table_size); - - if (status) { - dev_err(&vsi->back->pdev->dev, - "set_rss_lut failed, error %d\n", status); - err = -EIO; - goto ice_vsi_cfg_rss_exit; - } - - key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL); - if (!key) { - err = -ENOMEM; - goto ice_vsi_cfg_rss_exit; - } - - if (vsi->rss_hkey_user) - memcpy(seed, vsi->rss_hkey_user, - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); - else - netdev_rss_key_fill((void *)seed, - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); - memcpy(&key->standard_rss_key, seed, - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); - - status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key); - - if (status) { - dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n", - status); - err = -EIO; - } - - devm_kfree(&pf->pdev->dev, key); -ice_vsi_cfg_rss_exit: - devm_kfree(&pf->pdev->dev, lut); - return err; -} - -/** - * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI - * @vsi: pointer to the ice_vsi - * - * This reallocates the VSIs queue resources - * - * Returns 0 on success and negative value on failure - */ -static int ice_vsi_reinit_setup(struct ice_vsi *vsi) -{ - u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; - int ret, i; - - if (!vsi) - return -EINVAL; - - ice_vsi_free_q_vectors(vsi); - ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); - vsi->base_vector = 0; - ice_vsi_clear_rings(vsi); - ice_vsi_free_arrays(vsi, false); - ice_vsi_set_num_qs(vsi); - - /* Initialize VSI struct elements and create VSI in FW */ - ret = ice_vsi_add(vsi); - if (ret < 0) - goto err_vsi; - - ret = ice_vsi_alloc_arrays(vsi, false); - if (ret < 0) - goto err_vsi; - - switch (vsi->type) { - case ICE_VSI_PF: - if (!vsi->netdev) { - ret = ice_cfg_netdev(vsi); - if (ret) - goto err_rings; - - ret = register_netdev(vsi->netdev); - if (ret) - goto err_rings; - - netif_carrier_off(vsi->netdev); - netif_tx_stop_all_queues(vsi->netdev); - } - - ret = ice_vsi_alloc_q_vectors(vsi); - if (ret) - goto err_rings; - - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto err_vectors; - - ret = ice_vsi_alloc_rings(vsi); - if (ret) - goto err_vectors; - - ice_vsi_map_rings_to_vectors(vsi); - break; - default: - break; - } - - ice_vsi_set_tc_cfg(vsi); - - /* configure VSI nodes based on number of queues and TC's */ - for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = vsi->num_txq; - - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num, - vsi->tc_cfg.ena_tc, max_txqs); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "Failed VSI lan queue config\n"); - goto err_vectors; - } - return 0; - -err_vectors: - ice_vsi_free_q_vectors(vsi); -err_rings: - if (vsi->netdev) { - vsi->current_netdev_flags = 0; - unregister_netdev(vsi->netdev); - free_netdev(vsi->netdev); - vsi->netdev = NULL; - } -err_vsi: - ice_vsi_clear(vsi); - set_bit(__ICE_RESET_FAILED, vsi->back->state); - return ret; -} - -/** - * ice_vsi_setup - Set up a VSI by a given type + * ice_pf_vsi_setup - Set up a PF VSI * @pf: board private structure - * @type: VSI type * @pi: pointer to the port_info instance * - * This allocates the sw VSI structure and its queue resources. - * - * Returns pointer to the successfully allocated and configure VSI sw struct on - * success, otherwise returns NULL on failure. + * Returns pointer to the successfully allocated VSI sw struct on success, + * otherwise returns NULL on failure. */ static struct ice_vsi * -ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type, - struct ice_port_info *pi) -{ - u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; - struct device *dev = &pf->pdev->dev; - struct ice_vsi_ctx ctxt = { 0 }; - struct ice_vsi *vsi; - int ret, i; - - vsi = ice_vsi_alloc(pf, type); - if (!vsi) { - dev_err(dev, "could not allocate VSI\n"); - return NULL; - } - - vsi->port_info = pi; - vsi->vsw = pf->first_sw; - - if (ice_vsi_get_qs(vsi)) { - dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", - vsi->idx); - goto err_get_qs; - } - - /* set RSS capabilities */ - ice_vsi_set_rss_params(vsi); - - /* create the VSI */ - ret = ice_vsi_add(vsi); - if (ret) - goto err_vsi; - - ctxt.vsi_num = vsi->vsi_num; - - switch (vsi->type) { - case ICE_VSI_PF: - ret = ice_cfg_netdev(vsi); - if (ret) - goto err_cfg_netdev; - - ret = register_netdev(vsi->netdev); - if (ret) - goto err_register_netdev; - - netif_carrier_off(vsi->netdev); - - /* make sure transmit queues start off as stopped */ - netif_tx_stop_all_queues(vsi->netdev); - ret = ice_vsi_alloc_q_vectors(vsi); - if (ret) - goto err_msix; - - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto err_rings; - - ret = ice_vsi_alloc_rings(vsi); - if (ret) - goto err_rings; - - ice_vsi_map_rings_to_vectors(vsi); - - /* Do not exit if configuring RSS had an issue, at least - * receive traffic on first queue. Hence no need to capture - * return value - */ - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - ice_vsi_cfg_rss(vsi); - break; - default: - /* if vsi type is not recognized, clean up the resources and - * exit - */ - goto err_rings; - } - - ice_vsi_set_tc_cfg(vsi); - - /* configure VSI nodes based on number of queues and TC's */ - for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = vsi->num_txq; - - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num, - vsi->tc_cfg.ena_tc, max_txqs); - if (ret) { - dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n"); - goto err_rings; - } - - return vsi; - -err_rings: - ice_vsi_free_q_vectors(vsi); -err_msix: - if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) - unregister_netdev(vsi->netdev); -err_register_netdev: - if (vsi->netdev) { - free_netdev(vsi->netdev); - vsi->netdev = NULL; - } -err_cfg_netdev: - ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); - if (ret) - dev_err(&vsi->back->pdev->dev, - "Free VSI AQ call failed, err %d\n", ret); -err_vsi: - ice_vsi_put_qs(vsi); -err_get_qs: - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; - ice_vsi_clear(vsi); - - return NULL; -} - -/** - * ice_vsi_add_vlan - Add vsi membership for given vlan - * @vsi: the vsi being configured - * @vid: vlan id to be added - */ -static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) +ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - struct ice_fltr_list_entry *tmp; - struct ice_pf *pf = vsi->back; - LIST_HEAD(tmp_add_list); - enum ice_status status; - int err = 0; - - tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; - tmp->fltr_info.flag = ICE_FLTR_TX; - tmp->fltr_info.src = vsi->vsi_num; - tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num; - tmp->fltr_info.l_data.vlan.vlan_id = vid; - - INIT_LIST_HEAD(&tmp->list_entry); - list_add(&tmp->list_entry, &tmp_add_list); - - status = ice_add_vlan(&pf->hw, &tmp_add_list); - if (status) { - err = -ENODEV; - dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", - vid, vsi->vsi_num); - } - - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); - return err; + return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); } /** @@ -2908,7 +1678,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - int ret = 0; + int ret; if (vid >= VLAN_N_VID) { netdev_err(netdev, "VLAN id requested %d is out of range %d\n", @@ -2919,6 +1689,13 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, if (vsi->info.pvid) return -EINVAL; + /* Enable VLAN pruning when VLAN 0 is added */ + if (unlikely(!vid)) { + ret = ice_cfg_vlan_pruning(vsi, true); + if (ret) + return ret; + } + /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is * needed to continue allowing all untagged packets since VLAN prune * list is applied to all packets by the switch @@ -2932,38 +1709,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, } /** - * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN - * @vsi: the VSI being configured - * @vid: VLAN id to be removed - */ -static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) -{ - struct ice_fltr_list_entry *list; - struct ice_pf *pf = vsi->back; - LIST_HEAD(tmp_add_list); - - list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); - if (!list) - return; - - list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; - list->fltr_info.fwd_id.vsi_id = vsi->vsi_num; - list->fltr_info.fltr_act = ICE_FWD_TO_VSI; - list->fltr_info.l_data.vlan.vlan_id = vid; - list->fltr_info.flag = ICE_FLTR_TX; - list->fltr_info.src = vsi->vsi_num; - - INIT_LIST_HEAD(&list->list_entry); - list_add(&list->list_entry, &tmp_add_list); - - if (ice_remove_vlan(&pf->hw, &tmp_add_list)) - dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", - vid, vsi->vsi_num); - - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); -} - -/** * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload * @netdev: network interface to be adjusted * @proto: unused protocol @@ -2976,19 +1721,25 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev, { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + int status; if (vsi->info.pvid) return -EINVAL; - /* return code is ignored as there is nothing a user - * can do about failure to remove and a log message was - * already printed from the other function + /* Make sure ice_vsi_kill_vlan is successful before updating VLAN + * information */ - ice_vsi_kill_vlan(vsi, vid); + status = ice_vsi_kill_vlan(vsi, vid); + if (status) + return status; clear_bit(vid, vsi->active_vlans); - return 0; + /* Disable VLAN pruning when VLAN 0 is removed */ + if (unlikely(!vid)) + status = ice_cfg_vlan_pruning(vsi, false); + + return status; } /** @@ -3004,59 +1755,73 @@ static int ice_setup_pf_sw(struct ice_pf *pf) struct ice_vsi *vsi; int status = 0; - if (!ice_is_reset_recovery_pending(pf->state)) { - vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info); - if (!vsi) { - status = -ENOMEM; - goto error_exit; - } - } else { - vsi = pf->vsi[0]; - status = ice_vsi_reinit_setup(vsi); - if (status < 0) - return -EIO; + if (ice_is_reset_in_progress(pf->state)) + return -EBUSY; + + vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); + if (!vsi) { + status = -ENOMEM; + goto unroll_vsi_setup; + } + + status = ice_cfg_netdev(vsi); + if (status) { + status = -ENODEV; + goto unroll_vsi_setup; } - /* tmp_add_list contains a list of MAC addresses for which MAC - * filters need to be programmed. Add the VSI's unicast MAC to - * this list + /* registering the NAPI handler requires both the queues and + * netdev to be created, which are done in ice_pf_vsi_setup() + * and ice_cfg_netdev() respectively + */ + ice_napi_add(vsi); + + /* To add a MAC filter, first add the MAC to a list and then + * pass the list to ice_add_mac. */ + + /* Add a unicast MAC filter so the VSI can get its packets */ status = ice_add_mac_to_list(vsi, &tmp_add_list, vsi->port_info->mac.perm_addr); if (status) - goto error_exit; + goto unroll_napi_add; /* VSI needs to receive broadcast traffic, so add the broadcast - * MAC address to the list. + * MAC address to the list as well. */ eth_broadcast_addr(broadcast); status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); if (status) - goto error_exit; + goto free_mac_list; /* program MAC filters for entries in tmp_add_list */ status = ice_add_mac(&pf->hw, &tmp_add_list); if (status) { dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); status = -ENOMEM; - goto error_exit; + goto free_mac_list; } ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); return status; -error_exit: +free_mac_list: ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); +unroll_napi_add: if (vsi) { - ice_vsi_free_q_vectors(vsi); - if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) - unregister_netdev(vsi->netdev); + ice_napi_del(vsi); if (vsi->netdev) { + if (vsi->netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; } + } +unroll_vsi_setup: + if (vsi) { + ice_vsi_free_q_vectors(vsi); ice_vsi_delete(vsi); ice_vsi_put_qs(vsi); pf->q_left_tx += vsi->alloc_txq; @@ -3097,10 +1862,7 @@ static void ice_determine_q_usage(struct ice_pf *pf) */ static void ice_deinit_pf(struct ice_pf *pf) { - if (pf->serv_tmr.function) - del_timer_sync(&pf->serv_tmr); - if (pf->serv_task.func) - cancel_work_sync(&pf->serv_task); + ice_service_task_stop(pf); mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->avail_q_mutex); } @@ -3113,6 +1875,15 @@ static void ice_init_pf(struct ice_pf *pf) { bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); set_bit(ICE_FLAG_MSIX_ENA, pf->flags); +#ifdef CONFIG_PCI_IOV + if (pf->hw.func_caps.common_cap.sr_iov_1_1) { + struct ice_hw *hw = &pf->hw; + + set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); + pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs, + ICE_MAX_VF_COUNT); + } +#endif /* CONFIG_PCI_IOV */ mutex_init(&pf->sw_mutex); mutex_init(&pf->avail_q_mutex); @@ -3155,6 +1926,7 @@ static int ice_ena_msix_range(struct ice_pf *pf) /* reserve vectors for LAN traffic */ pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); v_budget += pf->num_lan_msix; + v_left -= pf->num_lan_msix; pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, sizeof(struct msix_entry), GFP_KERNEL); @@ -3182,10 +1954,11 @@ static int ice_ena_msix_range(struct ice_pf *pf) "not enough vectors. requested = %d, obtained = %d\n", v_budget, v_actual); if (v_actual >= (pf->num_lan_msix + 1)) { - pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1); + pf->num_avail_sw_msix = v_actual - + (pf->num_lan_msix + 1); } else if (v_actual >= 2) { pf->num_lan_msix = 1; - pf->num_avail_msix = v_actual - 2; + pf->num_avail_sw_msix = v_actual - 2; } else { pci_disable_msix(pf->pdev); err = -ERANGE; @@ -3218,12 +1991,32 @@ static void ice_dis_msix(struct ice_pf *pf) } /** + * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme + * @pf: board private structure + */ +static void ice_clear_interrupt_scheme(struct ice_pf *pf) +{ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + ice_dis_msix(pf); + + if (pf->sw_irq_tracker) { + devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker); + pf->sw_irq_tracker = NULL; + } + + if (pf->hw_irq_tracker) { + devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker); + pf->hw_irq_tracker = NULL; + } +} + +/** * ice_init_interrupt_scheme - Determine proper interrupt scheme * @pf: board private structure to initialize */ static int ice_init_interrupt_scheme(struct ice_pf *pf) { - int vectors = 0; + int vectors = 0, hw_vectors = 0; ssize_t size; if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) @@ -3237,30 +2030,31 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) /* set up vector assignment tracking */ size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors); - pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); - if (!pf->irq_tracker) { + pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); + if (!pf->sw_irq_tracker) { ice_dis_msix(pf); return -ENOMEM; } - pf->irq_tracker->num_entries = vectors; + /* populate SW interrupts pool with number of OS granted IRQs. */ + pf->num_avail_sw_msix = vectors; + pf->sw_irq_tracker->num_entries = vectors; - return 0; -} + /* set up HW vector assignment tracking */ + hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; + size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors); -/** - * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme - * @pf: board private structure - */ -static void ice_clear_interrupt_scheme(struct ice_pf *pf) -{ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_dis_msix(pf); - - if (pf->irq_tracker) { - devm_kfree(&pf->pdev->dev, pf->irq_tracker); - pf->irq_tracker = NULL; + pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); + if (!pf->hw_irq_tracker) { + ice_clear_interrupt_scheme(pf); + return -ENOMEM; } + + /* populate HW interrupts pool with number of HW supported irqs. */ + pf->num_avail_hw_msix = hw_vectors; + pf->hw_irq_tracker->num_entries = hw_vectors; + + return 0; } /** @@ -3307,6 +2101,8 @@ static int ice_probe(struct pci_dev *pdev, pf->pdev = pdev; pci_set_drvdata(pdev, pf); set_bit(__ICE_DOWN, pf->state); + /* Disable service task until DOWN bit is cleared */ + set_bit(__ICE_SERVICE_DIS, pf->state); hw = &pf->hw; hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; @@ -3364,6 +2160,9 @@ static int ice_probe(struct pci_dev *pdev, goto err_init_interrupt_unroll; } + /* Driver is mostly up */ + clear_bit(__ICE_DOWN, pf->state); + /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI * the misc functionality and queue processing is combined in @@ -3386,7 +2185,11 @@ static int ice_probe(struct pci_dev *pdev, goto err_msix_misc_unroll; } - pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; + if (hw->evb_veb) + pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; + else + pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; + pf->first_sw->pf = pf; /* record the sw_id available for later use */ @@ -3399,8 +2202,7 @@ static int ice_probe(struct pci_dev *pdev, goto err_alloc_sw_unroll; } - /* Driver is mostly up */ - clear_bit(__ICE_DOWN, pf->state); + clear_bit(__ICE_SERVICE_DIS, pf->state); /* since everything is good, start the service timer */ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); @@ -3414,6 +2216,7 @@ static int ice_probe(struct pci_dev *pdev, return 0; err_alloc_sw_unroll: + set_bit(__ICE_SERVICE_DIS, pf->state); set_bit(__ICE_DOWN, pf->state); devm_kfree(&pf->pdev->dev, pf->first_sw); err_msix_misc_unroll: @@ -3436,25 +2239,23 @@ err_exit_unroll: static void ice_remove(struct pci_dev *pdev) { struct ice_pf *pf = pci_get_drvdata(pdev); - int i = 0; - int err; + int i; if (!pf) return; set_bit(__ICE_DOWN, pf->state); + ice_service_task_stop(pf); - for (i = 0; i < pf->num_alloc_vsi; i++) { + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) + ice_free_vfs(pf); + ice_vsi_release_all(pf); + ice_free_irq_msix_misc(pf); + ice_for_each_vsi(pf, i) { if (!pf->vsi[i]) continue; - - err = ice_vsi_release(pf->vsi[i]); - if (err) - dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n", - i, err); + ice_vsi_free_q_vectors(pf->vsi[i]); } - - ice_free_irq_msix_misc(pf); ice_clear_interrupt_scheme(pf); ice_deinit_pf(pf); ice_deinit_hw(&pf->hw); @@ -3473,8 +2274,6 @@ static const struct pci_device_id ice_pci_tbl[] = { { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 }, /* required last entry */ { 0, } }; @@ -3485,6 +2284,7 @@ static struct pci_driver ice_driver = { .id_table = ice_pci_tbl, .probe = ice_probe, .remove = ice_remove, + .sriov_configure = ice_sriov_configure, }; /** @@ -3500,7 +2300,7 @@ static int __init ice_module_init(void) pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); pr_info("%s\n", ice_copyright); - ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME); + ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; @@ -3562,7 +2362,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) } if (test_bit(__ICE_DOWN, pf->state) || - ice_is_reset_recovery_pending(pf->state)) { + ice_is_reset_in_progress(pf->state)) { netdev_err(netdev, "can't set mac %pM. device not ready\n", mac); return -EBUSY; @@ -3722,78 +2522,6 @@ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], } /** - * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx - * @vsi: the vsi being changed - */ -static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) -{ - struct device *dev = &vsi->back->pdev->dev; - struct ice_hw *hw = &vsi->back->hw; - struct ice_vsi_ctx ctxt = { 0 }; - enum ice_status status; - - /* Here we are configuring the VSI to let the driver add VLAN tags by - * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag - * insertion happens in the Tx hot path, in ice_tx_map. - */ - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; - - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - ctxt.vsi_num = vsi->vsi_num; - - status = ice_aq_update_vsi(hw, &ctxt, NULL); - if (status) { - dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", - status, hw->adminq.sq_last_status); - return -EIO; - } - - vsi->info.vlan_flags = ctxt.info.vlan_flags; - return 0; -} - -/** - * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx - * @vsi: the vsi being changed - * @ena: boolean value indicating if this is a enable or disable request - */ -static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) -{ - struct device *dev = &vsi->back->pdev->dev; - struct ice_hw *hw = &vsi->back->hw; - struct ice_vsi_ctx ctxt = { 0 }; - enum ice_status status; - - /* Here we are configuring what the VSI should do with the VLAN tag in - * the Rx packet. We can either leave the tag in the packet or put it in - * the Rx descriptor. - */ - if (ena) { - /* Strip VLAN tag from Rx packet and put it in the desc */ - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; - } else { - /* Disable stripping. Leave tag in packet */ - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; - } - - /* Allow all packets untagged/tagged */ - ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; - - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - ctxt.vsi_num = vsi->vsi_num; - - status = ice_aq_update_vsi(hw, &ctxt, NULL); - if (status) { - dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n", - ena, status, hw->adminq.sq_last_status); - return -EIO; - } - - vsi->info.vlan_flags = ctxt.info.vlan_flags; - return 0; -} - -/** * ice_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting @@ -3805,6 +2533,12 @@ static int ice_set_features(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; int ret = 0; + if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) + ret = ice_vsi_manage_rss_lut(vsi, true); + else if (!(features & NETIF_F_RXHASH) && + netdev->features & NETIF_F_RXHASH) + ret = ice_vsi_manage_rss_lut(vsi, false); + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) ret = ice_vsi_manage_vlan_stripping(vsi, true); @@ -3863,248 +2597,6 @@ static int ice_restore_vlan(struct ice_vsi *vsi) } /** - * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance - * @ring: The Tx ring to configure - * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized - * @pf_q: queue index in the PF space - * - * Configure the Tx descriptor ring in TLAN context. - */ -static void -ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) -{ - struct ice_vsi *vsi = ring->vsi; - struct ice_hw *hw = &vsi->back->hw; - - tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; - - tlan_ctx->port_num = vsi->port_info->lport; - - /* Transmit Queue Length */ - tlan_ctx->qlen = ring->count; - - /* PF number */ - tlan_ctx->pf_num = hw->pf_id; - - /* queue belongs to a specific VSI type - * VF / VM index should be programmed per vmvf_type setting: - * for vmvf_type = VF, it is VF number between 0-256 - * for vmvf_type = VM, it is VM number between 0-767 - * for PF or EMP this field should be set to zero - */ - switch (vsi->type) { - case ICE_VSI_PF: - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; - break; - default: - return; - } - - /* make sure the context is associated with the right VSI */ - tlan_ctx->src_vsi = vsi->vsi_num; - - tlan_ctx->tso_ena = ICE_TX_LEGACY; - tlan_ctx->tso_qnum = pf_q; - - /* Legacy or Advanced Host Interface: - * 0: Advanced Host Interface - * 1: Legacy Host Interface - */ - tlan_ctx->legacy_int = ICE_TX_LEGACY; -} - -/** - * ice_vsi_cfg_txqs - Configure the VSI for Tx - * @vsi: the VSI being configured - * - * Return 0 on success and a negative value on error - * Configure the Tx VSI for operation. - */ -static int ice_vsi_cfg_txqs(struct ice_vsi *vsi) -{ - struct ice_aqc_add_tx_qgrp *qg_buf; - struct ice_aqc_add_txqs_perq *txq; - struct ice_pf *pf = vsi->back; - enum ice_status status; - u16 buf_len, i, pf_q; - int err = 0, tc = 0; - u8 num_q_grps; - - buf_len = sizeof(struct ice_aqc_add_tx_qgrp); - qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); - if (!qg_buf) - return -ENOMEM; - - if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) { - err = -EINVAL; - goto err_cfg_txqs; - } - qg_buf->num_txqs = 1; - num_q_grps = 1; - - /* set up and configure the tx queues */ - ice_for_each_txq(vsi, i) { - struct ice_tlan_ctx tlan_ctx = { 0 }; - - pf_q = vsi->txq_map[i]; - ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); - /* copy context contents into the qg_buf */ - qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); - - /* init queue specific tail reg. It is referred as transmit - * comm scheduler queue doorbell. - */ - vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); - status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc, - num_q_grps, qg_buf, buf_len, NULL); - if (status) { - dev_err(&vsi->back->pdev->dev, - "Failed to set LAN Tx queue context, error: %d\n", - status); - err = -ENODEV; - goto err_cfg_txqs; - } - - /* Add Tx Queue TEID into the VSI tx ring from the response - * This will complete configuring and enabling the queue. - */ - txq = &qg_buf->txqs[0]; - if (pf_q == le16_to_cpu(txq->txq_id)) - vsi->tx_rings[i]->txq_teid = - le32_to_cpu(txq->q_teid); - } -err_cfg_txqs: - devm_kfree(&pf->pdev->dev, qg_buf); - return err; -} - -/** - * ice_setup_rx_ctx - Configure a receive ring context - * @ring: The Rx ring to configure - * - * Configure the Rx descriptor ring in RLAN context. - */ -static int ice_setup_rx_ctx(struct ice_ring *ring) -{ - struct ice_vsi *vsi = ring->vsi; - struct ice_hw *hw = &vsi->back->hw; - u32 rxdid = ICE_RXDID_FLEX_NIC; - struct ice_rlan_ctx rlan_ctx; - u32 regval; - u16 pf_q; - int err; - - /* what is RX queue number in global space of 2K rx queues */ - pf_q = vsi->rxq_map[ring->q_index]; - - /* clear the context structure first */ - memset(&rlan_ctx, 0, sizeof(rlan_ctx)); - - rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S; - - rlan_ctx.qlen = ring->count; - - /* Receive Packet Data Buffer Size. - * The Packet Data Buffer Size is defined in 128 byte units. - */ - rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; - - /* use 32 byte descriptors */ - rlan_ctx.dsize = 1; - - /* Strip the Ethernet CRC bytes before the packet is posted to host - * memory. - */ - rlan_ctx.crcstrip = 1; - - /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ - rlan_ctx.l2tsel = 1; - - rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; - rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; - rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; - - /* This controls whether VLAN is stripped from inner headers - * The VLAN in the inner L2 header is stripped to the receive - * descriptor if enabled by this flag. - */ - rlan_ctx.showiv = 0; - - /* Max packet size for this queue - must not be set to a larger value - * than 5 x DBUF - */ - rlan_ctx.rxmax = min_t(u16, vsi->max_frame, - ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); - - /* Rx queue threshold in units of 64 */ - rlan_ctx.lrxqthresh = 1; - - /* Enable Flexible Descriptors in the queue context which - * allows this driver to select a specific receive descriptor format - */ - regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); - regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & - QRXFLXP_CNTXT_RXDID_IDX_M; - - /* increasing context priority to pick up profile id; - * default is 0x01; setting to 0x03 to ensure profile - * is programming if prev context is of same priority - */ - regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & - QRXFLXP_CNTXT_RXDID_PRIO_M; - - wr32(hw, QRXFLXP_CNTXT(pf_q), regval); - - /* Absolute queue number out of 2K needs to be passed */ - err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); - if (err) { - dev_err(&vsi->back->pdev->dev, - "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", - pf_q, err); - return -EIO; - } - - /* init queue specific tail register */ - ring->tail = hw->hw_addr + QRX_TAIL(pf_q); - writel(0, ring->tail); - ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); - - return 0; -} - -/** - * ice_vsi_cfg_rxqs - Configure the VSI for Rx - * @vsi: the VSI being configured - * - * Return 0 on success and a negative value on error - * Configure the Rx VSI for operation. - */ -static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) -{ - int err = 0; - u16 i; - - if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) - vsi->max_frame = vsi->netdev->mtu + - ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - else - vsi->max_frame = ICE_RXBUF_2048; - - vsi->rx_buf_len = ICE_RXBUF_2048; - /* set up individual rings */ - for (i = 0; i < vsi->num_rxq && !err; i++) - err = ice_setup_rx_ctx(vsi->rx_rings[i]); - - if (err) { - dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); - return -EIO; - } - return err; -} - -/** * ice_vsi_cfg - Setup the VSI * @vsi: the VSI being configured * @@ -4129,200 +2621,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) } /** - * ice_vsi_stop_tx_rings - Disable Tx rings - * @vsi: the VSI being configured - */ -static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - enum ice_status status; - u32 *q_teids, val; - u16 *q_ids, i; - int err = 0; - - if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) - return -EINVAL; - - q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), - GFP_KERNEL); - if (!q_teids) - return -ENOMEM; - - q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), - GFP_KERNEL); - if (!q_ids) { - err = -ENOMEM; - goto err_alloc_q_ids; - } - - /* set up the tx queue list to be disabled */ - ice_for_each_txq(vsi, i) { - u16 v_idx; - - if (!vsi->tx_rings || !vsi->tx_rings[i]) { - err = -EINVAL; - goto err_out; - } - - q_ids[i] = vsi->txq_map[i]; - q_teids[i] = vsi->tx_rings[i]->txq_teid; - - /* clear cause_ena bit for disabled queues */ - val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); - val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); - - /* software is expected to wait for 100 ns */ - ndelay(100); - - /* trigger a software interrupt for the vector associated to - * the queue to schedule napi handler - */ - v_idx = vsi->tx_rings[i]->q_vector->v_idx; - wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx), - GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); - } - status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, - NULL); - if (status) { - dev_err(&pf->pdev->dev, - "Failed to disable LAN Tx queues, error: %d\n", - status); - err = -ENODEV; - } - -err_out: - devm_kfree(&pf->pdev->dev, q_ids); - -err_alloc_q_ids: - devm_kfree(&pf->pdev->dev, q_teids); - - return err; -} - -/** - * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled - * @pf: the PF being configured - * @pf_q: the PF queue - * @ena: enable or disable state of the queue - * - * This routine will wait for the given Rx queue of the PF to reach the - * enabled or disabled state. - * Returns -ETIMEDOUT in case of failing to reach the requested state after - * multiple retries; else will return 0 in case of success. - */ -static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) -{ - int i; - - for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { - u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); - - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) - break; - - usleep_range(10, 20); - } - if (i >= ICE_Q_WAIT_RETRY_LIMIT) - return -ETIMEDOUT; - - return 0; -} - -/** - * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings - * @vsi: the VSI being configured - * @ena: start or stop the rx rings - */ -static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) -{ - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - int i, j, ret = 0; - - for (i = 0; i < vsi->num_rxq; i++) { - int pf_q = vsi->rxq_map[i]; - u32 rx_reg; - - for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { - rx_reg = rd32(hw, QRX_CTRL(pf_q)); - if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == - ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) - break; - usleep_range(1000, 2000); - } - - /* Skip if the queue is already in the requested state */ - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) - continue; - - /* turn on/off the queue */ - if (ena) - rx_reg |= QRX_CTRL_QENA_REQ_M; - else - rx_reg &= ~QRX_CTRL_QENA_REQ_M; - wr32(hw, QRX_CTRL(pf_q), rx_reg); - - /* wait for the change to finish */ - ret = ice_pf_rxq_wait(pf, pf_q, ena); - if (ret) { - dev_err(&pf->pdev->dev, - "VSI idx %d Rx ring %d %sable timeout\n", - vsi->idx, pf_q, (ena ? "en" : "dis")); - break; - } - } - - return ret; -} - -/** - * ice_vsi_start_rx_rings - start VSI's rx rings - * @vsi: the VSI whose rings are to be started - * - * Returns 0 on success and a negative value on error - */ -static int ice_vsi_start_rx_rings(struct ice_vsi *vsi) -{ - return ice_vsi_ctrl_rx_rings(vsi, true); -} - -/** - * ice_vsi_stop_rx_rings - stop VSI's rx rings - * @vsi: the VSI - * - * Returns 0 on success and a negative value on error - */ -static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) -{ - return ice_vsi_ctrl_rx_rings(vsi, false); -} - -/** - * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings - * @vsi: the VSI - * Returns 0 on success and a negative value on error - */ -static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi) -{ - int err_tx, err_rx; - - err_tx = ice_vsi_stop_tx_rings(vsi); - if (err_tx) - dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n"); - - err_rx = ice_vsi_stop_rx_rings(vsi); - if (err_rx) - dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n"); - - if (err_tx || err_rx) - return -EIO; - - return 0; -} - -/** * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI * @vsi: the VSI being configured */ @@ -4419,122 +2717,6 @@ static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, } /** - * ice_stat_update40 - read 40 bit stat from the chip and update stat values - * @hw: ptr to the hardware info - * @hireg: high 32 bit HW register to read from - * @loreg: low 32 bit HW register to read from - * @prev_stat_loaded: bool to specify if previous stats are loaded - * @prev_stat: ptr to previous loaded stat value - * @cur_stat: ptr to current stat value - */ -static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, - u64 *cur_stat) -{ - u64 new_data; - - new_data = rd32(hw, loreg); - new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; - - /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. - */ - if (!prev_stat_loaded) - *prev_stat = new_data; - if (likely(new_data >= *prev_stat)) - *cur_stat = new_data - *prev_stat; - else - /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; - *cur_stat &= 0xFFFFFFFFFFULL; -} - -/** - * ice_stat_update32 - read 32 bit stat from the chip and update stat values - * @hw: ptr to the hardware info - * @reg: HW register to read from - * @prev_stat_loaded: bool to specify if previous stats are loaded - * @prev_stat: ptr to previous loaded stat value - * @cur_stat: ptr to current stat value - */ -static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, - u64 *prev_stat, u64 *cur_stat) -{ - u32 new_data; - - new_data = rd32(hw, reg); - - /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. - */ - if (!prev_stat_loaded) - *prev_stat = new_data; - if (likely(new_data >= *prev_stat)) - *cur_stat = new_data - *prev_stat; - else - /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; -} - -/** - * ice_update_eth_stats - Update VSI-specific ethernet statistics counters - * @vsi: the VSI to be updated - */ -static void ice_update_eth_stats(struct ice_vsi *vsi) -{ - struct ice_eth_stats *prev_es, *cur_es; - struct ice_hw *hw = &vsi->back->hw; - u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ - - prev_es = &vsi->eth_stats_prev; - cur_es = &vsi->eth_stats; - - ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_bytes, - &cur_es->rx_bytes); - - ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_unicast, - &cur_es->rx_unicast); - - ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_multicast, - &cur_es->rx_multicast); - - ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_broadcast, - &cur_es->rx_broadcast); - - ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, - &prev_es->rx_discards, &cur_es->rx_discards); - - ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_bytes, - &cur_es->tx_bytes); - - ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_unicast, - &cur_es->tx_unicast); - - ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_multicast, - &cur_es->tx_multicast); - - ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_broadcast, - &cur_es->tx_broadcast); - - ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, - &prev_es->tx_errors, &cur_es->tx_errors); - - vsi->stat_offsets_loaded = true; -} - -/** * ice_update_vsi_ring_stats - Update VSI stats counters * @vsi: the VSI to be updated */ @@ -4827,7 +3009,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) */ int ice_down(struct ice_vsi *vsi) { - int i, err; + int i, tx_err, rx_err; /* Caller of this function is expected to set the * vsi->state __ICE_DOWN bit @@ -4838,7 +3020,18 @@ int ice_down(struct ice_vsi *vsi) } ice_vsi_dis_irq(vsi); - err = ice_vsi_stop_tx_rx_rings(vsi); + tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0); + if (tx_err) + netdev_err(vsi->netdev, + "Failed stop Tx rings, VSI %d error %d\n", + vsi->vsi_num, tx_err); + + rx_err = ice_vsi_stop_rx_rings(vsi); + if (rx_err) + netdev_err(vsi->netdev, + "Failed stop Rx rings, VSI %d error %d\n", + vsi->vsi_num, rx_err); + ice_napi_disable_all(vsi); ice_for_each_txq(vsi, i) @@ -4847,10 +3040,14 @@ int ice_down(struct ice_vsi *vsi) ice_for_each_rxq(vsi, i) ice_clean_rx_ring(vsi->rx_rings[i]); - if (err) - netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", + if (tx_err || rx_err) { + netdev_err(vsi->netdev, + "Failed to close VSI 0x%04X on switch 0x%04X\n", vsi->vsi_num, vsi->vsw->sw_id); - return err; + return -EIO; + } + + return 0; } /** @@ -4870,6 +3067,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) } ice_for_each_txq(vsi, i) { + vsi->tx_rings[i]->netdev = vsi->netdev; err = ice_setup_tx_ring(vsi->tx_rings[i]); if (err) break; @@ -4895,6 +3093,7 @@ static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) } ice_for_each_rxq(vsi, i) { + vsi->rx_rings[i]->netdev = vsi->netdev; err = ice_setup_rx_ring(vsi->rx_rings[i]); if (err) break; @@ -4922,38 +3121,6 @@ static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) } /** - * ice_vsi_free_tx_rings - Free Tx resources for VSI queues - * @vsi: the VSI having resources freed - */ -static void ice_vsi_free_tx_rings(struct ice_vsi *vsi) -{ - int i; - - if (!vsi->tx_rings) - return; - - ice_for_each_txq(vsi, i) - if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) - ice_free_tx_ring(vsi->tx_rings[i]); -} - -/** - * ice_vsi_free_rx_rings - Free Rx resources for VSI queues - * @vsi: the VSI having resources freed - */ -static void ice_vsi_free_rx_rings(struct ice_vsi *vsi) -{ - int i; - - if (!vsi->rx_rings) - return; - - ice_for_each_rxq(vsi, i) - if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) - ice_free_rx_ring(vsi->rx_rings[i]); -} - -/** * ice_vsi_open - Called when a network interface is made active * @vsi: the VSI to open * @@ -5014,78 +3181,26 @@ err_setup_tx: } /** - * ice_vsi_close - Shut down a VSI - * @vsi: the VSI being shut down - */ -static void ice_vsi_close(struct ice_vsi *vsi) -{ - if (!test_and_set_bit(__ICE_DOWN, vsi->state)) - ice_down(vsi); - - ice_vsi_free_irq(vsi); - ice_vsi_free_tx_rings(vsi); - ice_vsi_free_rx_rings(vsi); -} - -/** - * ice_rss_clean - Delete RSS related VSI structures that hold user inputs - * @vsi: the VSI being removed + * ice_vsi_release_all - Delete all VSIs + * @pf: PF from which all VSIs are being removed */ -static void ice_rss_clean(struct ice_vsi *vsi) +static void ice_vsi_release_all(struct ice_pf *pf) { - struct ice_pf *pf; + int err, i; - pf = vsi->back; - - if (vsi->rss_hkey_user) - devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); - if (vsi->rss_lut_user) - devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); -} - -/** - * ice_vsi_release - Delete a VSI and free its resources - * @vsi: the VSI being removed - * - * Returns 0 on success or < 0 on error - */ -static int ice_vsi_release(struct ice_vsi *vsi) -{ - struct ice_pf *pf; + if (!pf->vsi) + return; - if (!vsi->back) - return -ENODEV; - pf = vsi->back; + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (!pf->vsi[i]) + continue; - if (vsi->netdev) { - unregister_netdev(vsi->netdev); - free_netdev(vsi->netdev); - vsi->netdev = NULL; + err = ice_vsi_release(pf->vsi[i]); + if (err) + dev_dbg(&pf->pdev->dev, + "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", + i, err, pf->vsi[i]->vsi_num); } - - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - ice_rss_clean(vsi); - - /* Disable VSI and free resources */ - ice_vsi_dis_irq(vsi); - ice_vsi_close(vsi); - - /* reclaim interrupt vectors back to PF */ - ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); - pf->num_avail_msix += vsi->num_q_vectors; - - ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num); - ice_vsi_delete(vsi); - ice_vsi_free_q_vectors(vsi); - ice_vsi_clear_rings(vsi); - - ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; - - ice_vsi_clear(vsi); - - return 0; } /** @@ -5099,28 +3214,37 @@ static void ice_dis_vsi(struct ice_vsi *vsi) set_bit(__ICE_NEEDS_RESTART, vsi->state); - if (vsi->netdev && netif_running(vsi->netdev) && - vsi->type == ICE_VSI_PF) - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - - ice_vsi_close(vsi); + if (vsi->type == ICE_VSI_PF && vsi->netdev) { + if (netif_running(vsi->netdev)) { + rtnl_lock(); + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + rtnl_unlock(); + } else { + ice_vsi_close(vsi); + } + } } /** * ice_ena_vsi - resume a VSI * @vsi: the VSI being resume */ -static void ice_ena_vsi(struct ice_vsi *vsi) +static int ice_ena_vsi(struct ice_vsi *vsi) { - if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) - return; + int err = 0; + + if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) && + vsi->netdev) { + if (netif_running(vsi->netdev)) { + rtnl_lock(); + err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); + rtnl_unlock(); + } else { + err = ice_vsi_open(vsi); + } + } - if (vsi->netdev && netif_running(vsi->netdev)) - vsi->netdev->netdev_ops->ndo_open(vsi->netdev); - else if (ice_vsi_open(vsi)) - /* this clears the DOWN bit */ - dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n", - vsi->vsi_num, vsi->vsw->sw_id); + return err; } /** @@ -5140,13 +3264,89 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf) * ice_pf_ena_all_vsi - Resume all VSIs on a PF * @pf: the PF */ -static void ice_pf_ena_all_vsi(struct ice_pf *pf) +static int ice_pf_ena_all_vsi(struct ice_pf *pf) { int v; ice_for_each_vsi(pf, v) if (pf->vsi[v]) - ice_ena_vsi(pf->vsi[v]); + if (ice_ena_vsi(pf->vsi[v])) + return -EIO; + + return 0; +} + +/** + * ice_vsi_rebuild_all - rebuild all VSIs in pf + * @pf: the PF + */ +static int ice_vsi_rebuild_all(struct ice_pf *pf) +{ + int i; + + /* loop through pf->vsi array and reinit the VSI if found */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + int err; + + if (!pf->vsi[i]) + continue; + + /* VF VSI rebuild isn't supported yet */ + if (pf->vsi[i]->type == ICE_VSI_VF) + continue; + + err = ice_vsi_rebuild(pf->vsi[i]); + if (err) { + dev_err(&pf->pdev->dev, + "VSI at index %d rebuild failed\n", + pf->vsi[i]->idx); + return err; + } + + dev_info(&pf->pdev->dev, + "VSI at index %d rebuilt. vsi_num = 0x%x\n", + pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + } + + return 0; +} + +/** + * ice_vsi_replay_all - replay all VSIs configuration in the PF + * @pf: the PF + */ +static int ice_vsi_replay_all(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + enum ice_status ret; + int i; + + /* loop through pf->vsi array and replay the VSI if found */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (!pf->vsi[i]) + continue; + + ret = ice_replay_vsi(hw, pf->vsi[i]->idx); + if (ret) { + dev_err(&pf->pdev->dev, + "VSI at index %d replay failed %d\n", + pf->vsi[i]->idx, ret); + return -EIO; + } + + /* Re-map HW VSI number, using VSI handle that has been + * previously validated in ice_replay_vsi() call above + */ + pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx); + + dev_info(&pf->pdev->dev, + "VSI at index %d filter replayed successfully - vsi_num %i\n", + pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + } + + /* Clean up replay filter after successful re-configuration */ + ice_replay_post(hw); + return 0; } /** @@ -5168,13 +3368,13 @@ static void ice_rebuild(struct ice_pf *pf) ret = ice_init_all_ctrlq(hw); if (ret) { dev_err(dev, "control queues init failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; } ret = ice_clear_pf_cfg(hw); if (ret) { dev_err(dev, "clear PF configuration failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; } ice_clear_pxe_mode(hw); @@ -5182,14 +3382,34 @@ static void ice_rebuild(struct ice_pf *pf) ret = ice_get_caps(hw); if (ret) { dev_err(dev, "ice_get_caps failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; } - /* basic nic switch setup */ - err = ice_setup_pf_sw(pf); + err = ice_sched_init_port(hw->port_info); + if (err) + goto err_sched_init_port; + + /* reset search_hint of irq_trackers to 0 since interrupts are + * reclaimed and could be allocated from beginning during VSI rebuild + */ + pf->sw_irq_tracker->search_hint = 0; + pf->hw_irq_tracker->search_hint = 0; + + err = ice_vsi_rebuild_all(pf); if (err) { - dev_err(dev, "ice_setup_pf_sw failed\n"); - goto fail_reset; + dev_err(dev, "ice_vsi_rebuild_all failed\n"); + goto err_vsi_rebuild; + } + + err = ice_update_link_info(hw->port_info); + if (err) + dev_err(&pf->pdev->dev, "Get link status error %d\n", err); + + /* Replay all VSIs Configuration, including filters after reset */ + if (ice_vsi_replay_all(pf)) { + dev_err(&pf->pdev->dev, + "error replaying VSI configurations with switch filter rules\n"); + goto err_vsi_rebuild; } /* start misc vector */ @@ -5197,20 +3417,36 @@ static void ice_rebuild(struct ice_pf *pf) err = ice_req_irq_msix_misc(pf); if (err) { dev_err(dev, "misc vector setup failed: %d\n", err); - goto fail_reset; + goto err_vsi_rebuild; } } /* restart the VSIs that were rebuilt and running before the reset */ - ice_pf_ena_all_vsi(pf); + err = ice_pf_ena_all_vsi(pf); + if (err) { + dev_err(&pf->pdev->dev, "error enabling VSIs\n"); + /* no need to disable VSIs in tear down path in ice_rebuild() + * since its already taken care in ice_vsi_open() + */ + goto err_vsi_rebuild; + } + ice_reset_all_vfs(pf, true); + /* if we get here, reset flow is successful */ + clear_bit(__ICE_RESET_FAILED, pf->state); return; -fail_reset: +err_vsi_rebuild: + ice_vsi_release_all(pf); +err_sched_init_port: + ice_sched_cleanup_all(hw); +err_init_ctrlq: ice_shutdown_all_ctrlq(hw); set_bit(__ICE_RESET_FAILED, pf->state); clear_recovery: - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + /* set this bit in PF state to control service task scheduling */ + set_bit(__ICE_NEEDS_RESTART, pf->state); + dev_err(dev, "Rebuild failed, unload and reload driver\n"); } /** @@ -5243,7 +3479,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) } /* if a reset is in progress, wait for some time for it to complete */ do { - if (ice_is_reset_recovery_pending(pf->state)) { + if (ice_is_reset_in_progress(pf->state)) { count++; usleep_range(1000, 2000); } else { @@ -5299,7 +3535,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) struct ice_aqc_get_set_rss_keys *buf = (struct ice_aqc_get_set_rss_keys *)seed; - status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf); + status = ice_aq_set_rss_key(hw, vsi->idx, buf); if (status) { dev_err(&pf->pdev->dev, @@ -5310,8 +3546,8 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) } if (lut) { - status = ice_aq_set_rss_lut(hw, vsi->vsi_num, - vsi->rss_lut_type, lut, lut_size); + status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, + lut, lut_size); if (status) { dev_err(&pf->pdev->dev, "Cannot set RSS lut, err %d aq_err %d\n", @@ -5342,7 +3578,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) struct ice_aqc_get_set_rss_keys *buf = (struct ice_aqc_get_set_rss_keys *)seed; - status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf); + status = ice_aq_get_rss_key(hw, vsi->idx, buf); if (status) { dev_err(&pf->pdev->dev, "Cannot get RSS key, err %d aq_err %d\n", @@ -5352,8 +3588,8 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) } if (lut) { - status = ice_aq_get_rss_lut(hw, vsi->vsi_num, - vsi->rss_lut_type, lut, lut_size); + status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, + lut, lut_size); if (status) { dev_err(&pf->pdev->dev, "Cannot get RSS lut, err %d aq_err %d\n", @@ -5366,6 +3602,232 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) } /** + * ice_bridge_getlink - Get the hardware bridge mode + * @skb: skb buff + * @pid: process id + * @seq: RTNL message seq + * @dev: the netdev being configured + * @filter_mask: filter mask passed in + * @nlflags: netlink flags passed in + * + * Return the bridge mode (VEB/VEPA) + */ +static int +ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, int nlflags) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u16 bmode; + + bmode = pf->first_sw->bridge_mode; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, + filter_mask, NULL); +} + +/** + * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) + * @vsi: Pointer to VSI structure + * @bmode: Hardware bridge mode (VEB/VEPA) + * + * Returns 0 on success, negative on failure + */ +static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_aqc_vsi_props *vsi_props; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + vsi_props = &vsi->info; + ctxt.info = vsi->info; + + if (bmode == BRIDGE_MODE_VEB) + /* change from VEPA to VEB mode */ + ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + else + /* change from VEB to VEPA mode */ + ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); + + status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (status) { + dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", + bmode, status, hw->adminq.sq_last_status); + return -EIO; + } + /* Update sw flags for book keeping */ + vsi_props->sw_flags = ctxt.info.sw_flags; + + return 0; +} + +/** + * ice_bridge_setlink - Set the hardware bridge mode + * @dev: the netdev being configured + * @nlh: RTNL message + * @flags: bridge setlink flags + * + * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is + * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if + * not already set for all VSIs connected to this switch. And also update the + * unicast switch filter rules for the corresponding switch of the netdev. + */ +static int +ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 __always_unused flags) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_pf *pf = np->vsi->back; + struct nlattr *attr, *br_spec; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + struct ice_sw *pf_sw; + int rem, v, err = 0; + + pf_sw = pf->first_sw; + /* find the attribute in the netlink message */ + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + mode = nla_get_u16(attr); + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) + return -EINVAL; + /* Continue if bridge mode is not being flipped */ + if (mode == pf_sw->bridge_mode) + continue; + /* Iterates through the PF VSI list and update the loopback + * mode of the VSI + */ + ice_for_each_vsi(pf, v) { + if (!pf->vsi[v]) + continue; + err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); + if (err) + return err; + } + + hw->evb_veb = (mode == BRIDGE_MODE_VEB); + /* Update the unicast switch filter rules for the corresponding + * switch of the netdev + */ + status = ice_update_sw_rule_bridge_mode(hw); + if (status) { + netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n", + mode, status, hw->adminq.sq_last_status); + /* revert hw->evb_veb */ + hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); + return -EIO; + } + + pf_sw->bridge_mode = mode; + } + + return 0; +} + +/** + * ice_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void ice_tx_timeout(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_ring *tx_ring = NULL; + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u32 head, val = 0, i; + int hung_queue = -1; + + pf->tx_timeout_count++; + + /* find the stopped queue the same way the stack does */ + for (i = 0; i < netdev->num_tx_queues; i++) { + struct netdev_queue *q; + unsigned long trans_start; + + q = netdev_get_tx_queue(netdev, i); + trans_start = q->trans_start; + if (netif_xmit_stopped(q) && + time_after(jiffies, + (trans_start + netdev->watchdog_timeo))) { + hung_queue = i; + break; + } + } + + if (i == netdev->num_tx_queues) { + netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); + } else { + /* now that we have an index, find the tx_ring struct */ + for (i = 0; i < vsi->num_txq; i++) { + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { + if (hung_queue == + vsi->tx_rings[i]->q_index) { + tx_ring = vsi->tx_rings[i]; + break; + } + } + } + } + + /* Reset recovery level if enough time has elapsed after last timeout. + * Also ensure no new reset action happens before next timeout period. + */ + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) + pf->tx_timeout_recovery_level = 1; + else if (time_before(jiffies, (pf->tx_timeout_last_recovery + + netdev->watchdog_timeo))) + return; + + if (tx_ring) { + head = tx_ring->next_to_clean; + /* Read interrupt register */ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + val = rd32(&pf->hw, + GLINT_DYN_CTL(tx_ring->q_vector->v_idx + + tx_ring->vsi->hw_base_vector)); + + netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", + vsi->vsi_num, hung_queue, tx_ring->next_to_clean, + head, tx_ring->next_to_use, + readl(tx_ring->tail), val); + } + + pf->tx_timeout_last_recovery = jiffies; + netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", + pf->tx_timeout_recovery_level, hung_queue); + + switch (pf->tx_timeout_recovery_level) { + case 1: + set_bit(__ICE_PFR_REQ, pf->state); + break; + case 2: + set_bit(__ICE_CORER_REQ, pf->state); + break; + case 3: + set_bit(__ICE_GLOBR_REQ, pf->state); + break; + default: + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); + set_bit(__ICE_DOWN, pf->state); + set_bit(__ICE_NEEDS_RESTART, vsi->state); + set_bit(__ICE_SERVICE_DIS, pf->state); + break; + } + + ice_service_task_schedule(pf); + pf->tx_timeout_recovery_level++; +} + +/** * ice_open - Called when a network interface becomes active * @netdev: network interface device structure * @@ -5383,6 +3845,11 @@ static int ice_open(struct net_device *netdev) struct ice_vsi *vsi = np->vsi; int err; + if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { + netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); + return -EIO; + } + netif_carrier_off(netdev); err = ice_vsi_open(vsi); @@ -5473,9 +3940,18 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = ice_change_mtu, .ndo_get_stats64 = ice_get_stats64, + .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, + .ndo_set_vf_mac = ice_set_vf_mac, + .ndo_get_vf_config = ice_get_vf_cfg, + .ndo_set_vf_trust = ice_set_vf_trust, + .ndo_set_vf_vlan = ice_set_vf_port_vlan, + .ndo_set_vf_link_state = ice_set_vf_link_state, .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, .ndo_set_features = ice_set_features, + .ndo_bridge_getlink = ice_bridge_getlink, + .ndo_bridge_setlink = ice_bridge_setlink, .ndo_fdb_add = ice_fdb_add, .ndo_fdb_del = ice_fdb_del, + .ndo_tx_timeout = ice_tx_timeout, }; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 295a8cd87fc1..3274c543283c 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -137,7 +137,7 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) if (hw->nvm.blank_nvm_mode) return 0; - return ice_acquire_res(hw, ICE_NVM_RES_ID, access); + return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index eeae199469b6..7cc8aa18a22b 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -17,7 +17,6 @@ ice_sched_add_root_node(struct ice_port_info *pi, { struct ice_sched_node *root; struct ice_hw *hw; - u16 max_children; if (!pi) return ICE_ERR_PARAM; @@ -28,8 +27,8 @@ ice_sched_add_root_node(struct ice_port_info *pi, if (!root) return ICE_ERR_NO_MEMORY; - max_children = le16_to_cpu(hw->layer_info[0].max_children); - root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, + /* coverity[suspicious_sizeof] */ + root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], sizeof(*root), GFP_KERNEL); if (!root->children) { devm_kfree(ice_hw_to_dev(hw), root); @@ -86,6 +85,62 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) } /** + * ice_aq_query_sched_elems - query scheduler elements + * @hw: pointer to the hw struct + * @elems_req: number of elements to query + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_ret: returns total number of elements returned + * @cd: pointer to command details structure or NULL + * + * Query scheduling elements (0x0404) + */ +static enum ice_status +ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_get_elem *buf, u16 buf_size, + u16 *elems_ret, struct ice_sq_cd *cd) +{ + struct ice_aqc_get_cfg_elem *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.get_update_elem; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems); + cmd->num_elem_req = cpu_to_le16(elems_req); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status && elems_ret) + *elems_ret = le16_to_cpu(cmd->num_elem_resp); + + return status; +} + +/** + * ice_sched_query_elem - query element information from hw + * @hw: pointer to the hw struct + * @node_teid: node teid to be queried + * @buf: buffer to element information + * + * This function queries HW element information + */ +static enum ice_status +ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, + struct ice_aqc_get_elem *buf) +{ + u16 buf_size, num_elem_ret = 0; + enum ice_status status; + + buf_size = sizeof(*buf); + memset(buf, 0, buf_size); + buf->generic[0].node_teid = cpu_to_le32(node_teid); + status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, + NULL); + if (status || num_elem_ret != 1) + ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); + return status; +} + +/** * ice_sched_add_node - Insert the Tx scheduler node in SW DB * @pi: port information structure * @layer: Scheduler layer of the node @@ -98,9 +153,10 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info) { struct ice_sched_node *parent; + struct ice_aqc_get_elem elem; struct ice_sched_node *node; + enum ice_status status; struct ice_hw *hw; - u16 max_children; if (!pi) return ICE_ERR_PARAM; @@ -117,12 +173,20 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, return ICE_ERR_PARAM; } + /* query the current node information from FW before additing it + * to the SW DB + */ + status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem); + if (status) + return status; + node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); if (!node) return ICE_ERR_NO_MEMORY; - max_children = le16_to_cpu(hw->layer_info[layer].max_children); - if (max_children) { - node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, + if (hw->max_children[layer]) { + /* coverity[suspicious_sizeof] */ + node->children = devm_kcalloc(ice_hw_to_dev(hw), + hw->max_children[layer], sizeof(*node), GFP_KERNEL); if (!node->children) { devm_kfree(ice_hw_to_dev(hw), node); @@ -134,7 +198,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, node->parent = parent; node->tx_sched_layer = layer; parent->children[parent->num_children++] = node; - memcpy(&node->info, info, sizeof(*info)); + memcpy(&node->info, &elem.generic[0], sizeof(node->info)); return 0; } @@ -192,14 +256,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; + buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = cpu_to_le16(num_nodes); for (i = 0; i < num_nodes; i++) buf->teid[i] = cpu_to_le32(node_teids[i]); + status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, &num_groups_removed, NULL); if (status || num_groups_removed != 1) ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n"); + devm_kfree(ice_hw_to_dev(hw), buf); return status; } @@ -532,9 +599,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, static void ice_sched_clear_tx_topo(struct ice_port_info *pi) { struct ice_sched_agg_info *agg_info; - struct ice_sched_vsi_info *vsi_elem; struct ice_sched_agg_info *atmp; - struct ice_sched_vsi_info *tmp; struct ice_hw *hw; if (!pi) @@ -553,13 +618,6 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi) } } - /* remove the vsi list */ - list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list, - list_entry) { - list_del(&vsi_elem->list_entry); - devm_kfree(ice_hw_to_dev(hw), vsi_elem); - } - if (pi->root) { ice_free_sched_node(pi, pi->root); pi->root = NULL; @@ -592,13 +650,16 @@ static void ice_sched_clear_port(struct ice_port_info *pi) */ void ice_sched_cleanup_all(struct ice_hw *hw) { - if (!hw || !hw->port_info) + if (!hw) return; - if (hw->layer_info) + if (hw->layer_info) { devm_kfree(ice_hw_to_dev(hw), hw->layer_info); + hw->layer_info = NULL; + } - ice_sched_clear_port(hw->port_info); + if (hw->port_info) + ice_sched_clear_port(hw->port_info); hw->num_tx_sched_layers = 0; hw->num_tx_sched_phys_layers = 0; @@ -607,31 +668,6 @@ void ice_sched_cleanup_all(struct ice_hw *hw) } /** - * ice_sched_create_vsi_info_entry - create an empty new VSI entry - * @pi: port information structure - * @vsi_id: VSI Id - * - * This function creates a new VSI entry and adds it to list - */ -static struct ice_sched_vsi_info * -ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id) -{ - struct ice_sched_vsi_info *vsi_elem; - - if (!pi) - return NULL; - - vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem), - GFP_KERNEL); - if (!vsi_elem) - return NULL; - - list_add(&vsi_elem->list_entry, &pi->vsi_info_list); - vsi_elem->vsi_id = vsi_id; - return vsi_elem; -} - -/** * ice_sched_add_elems - add nodes to hw and SW DB * @pi: port information structure * @tc_node: pointer to the branch node @@ -671,9 +707,13 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, ICE_AQC_ELEM_VALID_EIR; buf->generic[i].data.generic = 0; buf->generic[i].data.cir_bw.bw_profile_idx = - ICE_SCHED_DFLT_RL_PROF_ID; + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->generic[i].data.cir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); buf->generic[i].data.eir_bw.bw_profile_idx = - ICE_SCHED_DFLT_RL_PROF_ID; + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->generic[i].data.eir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); } status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, @@ -697,7 +737,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, teid = le32_to_cpu(buf->generic[i].node_teid); new_node = ice_sched_find_node_by_teid(parent, teid); - if (!new_node) { ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid); @@ -710,7 +749,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, /* add it to previous node sibling pointer */ /* Note: siblings are not linked across branches */ prev = ice_sched_get_first_node(hw, tc_node, layer); - if (prev && prev != new_node) { while (prev->sibling) prev = prev->sibling; @@ -760,8 +798,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, return ICE_ERR_PARAM; /* max children per node per layer */ - max_child_nodes = - le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); + max_child_nodes = hw->max_children[parent->tx_sched_layer]; /* current number of children + required nodes exceed max children ? */ if ((parent->num_children + num_nodes) > max_child_nodes) { @@ -851,78 +888,6 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) } /** - * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer - * @pi: pointer to the port info struct - * @layer: layer number - * - * This function calculates the number of nodes present in the scheduler tree - * including all the branches for a given layer - */ -static u16 -ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer) -{ - struct ice_hw *hw; - u16 num_nodes = 0; - u8 i; - - if (!pi) - return num_nodes; - - hw = pi->hw; - - /* Calculate the number of nodes for all TCs */ - for (i = 0; i < pi->root->num_children; i++) { - struct ice_sched_node *tc_node, *node; - - tc_node = pi->root->children[i]; - - /* Get the first node */ - node = ice_sched_get_first_node(hw, tc_node, layer); - if (!node) - continue; - - /* count the siblings */ - while (node) { - num_nodes++; - node = node->sibling; - } - } - - return num_nodes; -} - -/** - * ice_sched_val_max_nodes - check max number of nodes reached or not - * @pi: port information structure - * @new_num_nodes_per_layer: pointer to the new number of nodes array - * - * This function checks whether the scheduler tree layers have enough space to - * add new nodes - */ -static enum ice_status -ice_sched_validate_for_max_nodes(struct ice_port_info *pi, - u16 *new_num_nodes_per_layer) -{ - struct ice_hw *hw = pi->hw; - u8 i, qg_layer; - u16 num_nodes; - - qg_layer = ice_sched_get_qgrp_layer(hw); - - /* walk through all the layers from SW entry point to qgroup layer */ - for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) { - num_nodes = ice_sched_get_num_nodes_per_layer(pi, i); - if (num_nodes + new_num_nodes_per_layer[i] > - le16_to_cpu(hw->layer_info[i].max_pf_nodes)) { - ice_debug(hw, ICE_DBG_SCHED, - "max nodes reached for layer = %d\n", i); - return ICE_ERR_CFG; - } - } - return 0; -} - -/** * ice_rm_dflt_leaf_node - remove the default leaf node in the tree * @pi: port information structure * @@ -1003,14 +968,12 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) hw = pi->hw; /* Query the Default Topology from FW */ - buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES, - sizeof(*buf), GFP_KERNEL); + buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; /* Query default scheduling tree topology */ - status = ice_aq_get_dflt_topo(hw, pi->lport, buf, - sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES, + status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, &num_branches, NULL); if (status) goto err_init_port; @@ -1075,7 +1038,6 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) pi->port_state = ICE_SCHED_PORT_STATE_READY; mutex_init(&pi->sched_lock); INIT_LIST_HEAD(&pi->agg_list); - INIT_LIST_HEAD(&pi->vsi_info_list); err_init_port: if (status && pi->root) { @@ -1097,6 +1059,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) { struct ice_aqc_query_txsched_res_resp *buf; enum ice_status status = 0; + __le16 max_sibl; + u8 i; if (hw->layer_info) return status; @@ -1115,7 +1079,20 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) hw->flattened_layers = buf->sched_props.flattening_bitmap; hw->max_cgds = buf->sched_props.max_pf_cgds; - hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, + /* max sibling group size of current layer refers to the max children + * of the below layer node. + * layer 1 node max children will be layer 2 max sibling group size + * layer 2 node max children will be layer 3 max sibling group size + * and so on. This array will be populated from root (index 0) to + * qgroup layer 7. Leaf node has no children. + */ + for (i = 0; i < hw->num_tx_sched_layers; i++) { + max_sibl = buf->layer_props[i].max_sibl_grp_sz; + hw->max_children[i] = le16_to_cpu(max_sibl); + } + + hw->layer_info = (struct ice_aqc_layer_props *) + devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, (hw->num_tx_sched_layers * sizeof(*hw->layer_info)), GFP_KERNEL); @@ -1130,27 +1107,6 @@ sched_query_out: } /** - * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id - * @pi: port information structure - * @vsi_id: vsi id - * - * This function retrieves the vsi list for the given vsi id - */ -static struct ice_sched_vsi_info * -ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id) -{ - struct ice_sched_vsi_info *list_elem; - - if (!pi) - return NULL; - - list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry) - if (list_elem->vsi_id == vsi_id) - return list_elem; - return NULL; -} - -/** * ice_sched_find_node_in_subtree - Find node in part of base node subtree * @hw: pointer to the hw struct * @base: pointer to the base node @@ -1186,30 +1142,28 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, /** * ice_sched_get_free_qparent - Get a free lan or rdma q group node * @pi: port information structure - * @vsi_id: vsi id + * @vsi_handle: software VSI handle * @tc: branch number * @owner: lan or rdma * * This function retrieves a free lan or rdma q group node */ struct ice_sched_node * -ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc, +ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 owner) { struct ice_sched_node *vsi_node, *qgrp_node = NULL; - struct ice_sched_vsi_info *list_elem; + struct ice_vsi_ctx *vsi_ctx; u16 max_children; u8 qgrp_layer; qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); - max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children); - - list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id); - if (!list_elem) - goto lan_q_exit; - - vsi_node = list_elem->vsi_node[tc]; + max_children = pi->hw->max_children[qgrp_layer]; + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + return NULL; + vsi_node = vsi_ctx->sched.vsi_node[tc]; /* validate invalid VSI id */ if (!vsi_node) goto lan_q_exit; @@ -1233,14 +1187,14 @@ lan_q_exit: * ice_sched_get_vsi_node - Get a VSI node based on VSI id * @hw: pointer to the hw struct * @tc_node: pointer to the TC node - * @vsi_id: VSI id + * @vsi_handle: software VSI handle * * This function retrieves a VSI node for a given VSI id from a given * TC branch */ static struct ice_sched_node * ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, - u16 vsi_id) + u16 vsi_handle) { struct ice_sched_node *node; u8 vsi_layer; @@ -1250,7 +1204,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, /* Check whether it already exists */ while (node) { - if (node->vsi_id == vsi_id) + if (node->vsi_handle == vsi_handle) return node; node = node->sibling; } @@ -1278,10 +1232,8 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) /* calculate num nodes from q group to VSI layer */ for (i = qgl; i > vsil; i--) { - u16 max_children = le16_to_cpu(hw->layer_info[i].max_children); - /* round to the next integer if there is a remainder */ - num = DIV_ROUND_UP(num, max_children); + num = DIV_ROUND_UP(num, hw->max_children[i]); /* need at least one node */ num_nodes[i] = num ? num : 1; @@ -1291,7 +1243,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) /** * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree * @pi: port information structure - * @vsi_id: VSI id + * @vsi_handle: software VSI handle * @tc_node: pointer to the TC node * @num_nodes: pointer to the num nodes that needs to be added per layer * @owner: node owner (lan or rdma) @@ -1300,7 +1252,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) * lan and rdma separately. */ static enum ice_status -ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, +ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes, u8 owner) { @@ -1311,16 +1263,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u16 num_added = 0; u8 i, qgl, vsil; - status = ice_sched_validate_for_max_nodes(pi, num_nodes); - if (status) - return status; - qgl = ice_sched_get_qgrp_layer(hw); vsil = ice_sched_get_vsi_layer(hw); - parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); for (i = vsil + 1; i <= qgl; i++) { if (!parent) return ICE_ERR_CFG; + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, @@ -1398,8 +1347,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, struct ice_sched_node *tc_node, u16 *num_nodes) { struct ice_sched_node *node; - u16 max_child; - u8 i, vsil; + u8 vsil; + int i; vsil = ice_sched_get_vsi_layer(hw); for (i = vsil; i >= hw->sw_entry_point_layer; i--) @@ -1412,12 +1361,10 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, /* If intermediate nodes are reached max children * then add a new one. */ - node = ice_sched_get_first_node(hw, tc_node, i); - max_child = le16_to_cpu(hw->layer_info[i].max_children); - + node = ice_sched_get_first_node(hw, tc_node, (u8)i); /* scan all the siblings */ while (node) { - if (node->num_children < max_child) + if (node->num_children < hw->max_children[i]) break; node = node->sibling; } @@ -1431,7 +1378,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, /** * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle * @tc_node: pointer to TC node * @num_nodes: pointer to num nodes array * @@ -1439,7 +1386,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, * VSI, its parent and intermediate nodes in below layers */ static enum ice_status -ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id, +ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes) { struct ice_sched_node *parent = tc_node; @@ -1451,10 +1398,6 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id, if (!pi) return ICE_ERR_PARAM; - status = ice_sched_validate_for_max_nodes(pi, num_nodes); - if (status) - return status; - vsil = ice_sched_get_vsi_layer(pi->hw); for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, @@ -1477,21 +1420,22 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id, return ICE_ERR_CFG; if (i == vsil) - parent->vsi_id = vsi_id; + parent->vsi_handle = vsi_handle; } + return 0; } /** * ice_sched_add_vsi_to_topo - add a new VSI into tree * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle * @tc: TC number * * This function adds a new VSI into scheduler tree */ static enum ice_status -ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc) +ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) { u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; struct ice_sched_node *tc_node; @@ -1505,13 +1449,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc) ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes); /* add vsi supported nodes to tc subtree */ - return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes); + return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, + num_nodes); } /** * ice_sched_update_vsi_child_nodes - update VSI child nodes * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle * @tc: TC number * @new_numqs: new number of max queues * @owner: owner of this subtree @@ -1519,14 +1464,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc) * This function updates the VSI child nodes based on the number of queues */ static enum ice_status -ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, - u16 new_numqs, u8 owner) +ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, + u8 tc, u16 new_numqs, u8 owner) { u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; struct ice_sched_node *vsi_node; struct ice_sched_node *tc_node; - struct ice_sched_vsi_info *vsi; + struct ice_vsi_ctx *vsi_ctx; enum ice_status status = 0; struct ice_hw *hw = pi->hw; u16 prev_numqs; @@ -1536,16 +1481,16 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, if (!tc_node) return ICE_ERR_CFG; - vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); if (!vsi_node) return ICE_ERR_CFG; - vsi = ice_sched_get_vsi_info_entry(pi, vsi_id); - if (!vsi) - return ICE_ERR_CFG; + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; if (owner == ICE_SCHED_NODE_OWNER_LAN) - prev_numqs = vsi->max_lanq[tc]; + prev_numqs = vsi_ctx->sched.max_lanq[tc]; else return ICE_ERR_PARAM; @@ -1570,13 +1515,13 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) new_num_nodes[i] -= prev_num_nodes[i]; - status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node, + status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, new_num_nodes, owner); if (status) return status; } - vsi->max_lanq[tc] = new_numqs; + vsi_ctx->sched.max_lanq[tc] = new_numqs; return status; } @@ -1584,7 +1529,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, /** * ice_sched_cfg_vsi - configure the new/exisiting VSI * @pi: port information structure - * @vsi_id: VSI Id + * @vsi_handle: software VSI handle * @tc: TC number * @maxqs: max number of queues * @owner: lan or rdma @@ -1595,25 +1540,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, * disabled then suspend the VSI if it is not already. */ enum ice_status -ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs, +ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable) { struct ice_sched_node *vsi_node, *tc_node; - struct ice_sched_vsi_info *vsi; + struct ice_vsi_ctx *vsi_ctx; enum ice_status status = 0; struct ice_hw *hw = pi->hw; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) return ICE_ERR_PARAM; - - vsi = ice_sched_get_vsi_info_entry(pi, vsi_id); - if (!vsi) - vsi = ice_sched_create_vsi_info_entry(pi, vsi_id); - if (!vsi) - return ICE_ERR_NO_MEMORY; - - vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); /* suspend the VSI if tc is not enabled */ if (!enable) { @@ -1630,18 +1571,26 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs, /* TC is enabled, if it is a new VSI then add it to the tree */ if (!vsi_node) { - status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc); + status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); if (status) return status; - vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); + + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); if (!vsi_node) return ICE_ERR_CFG; - vsi->vsi_node[tc] = vsi_node; + + vsi_ctx->sched.vsi_node[tc] = vsi_node; vsi_node->in_use = true; + /* invalidate the max queues whenever VSI gets added first time + * into the scheduler tree (boot or after reset). We need to + * recreate the child nodes all the time in these cases. + */ + vsi_ctx->sched.max_lanq[tc] = 0; } /* update the VSI child nodes */ - status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner); + status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, + owner); if (status) return status; diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index badadcc120d3..5dc9cfa04c58 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -12,7 +12,6 @@ struct ice_sched_agg_vsi_info { struct list_head list_entry; DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS); - u16 vsi_id; }; struct ice_sched_agg_info { @@ -35,9 +34,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node); struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc); struct ice_sched_node * -ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc, +ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 owner); enum ice_status -ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs, +ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c new file mode 100644 index 000000000000..027eba4e13f8 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_adminq_cmd.h" +#include "ice_sriov.h" + +/** + * ice_aq_send_msg_to_vf + * @hw: pointer to the hardware structure + * @vfid: VF ID to send msg + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cd: pointer to command details + * + * Send message to VF driver (0x0802) using mailbox + * queue and asynchronously sending message via + * ice_sq_send_cmd() function + */ +enum ice_status +ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, + u8 *msg, u16 msglen, struct ice_sq_cd *cd) +{ + struct ice_aqc_pf_vf_msg *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf); + + cmd = &desc.params.virt; + cmd->id = cpu_to_le32(vfid); + + desc.cookie_high = cpu_to_le32(v_opcode); + desc.cookie_low = cpu_to_le32(v_retval); + + if (msglen) + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd); +} + +/** + * ice_conv_link_speed_to_virtchnl + * @adv_link_support: determines the format of the returned link speed + * @link_speed: variable containing the link_speed to be converted + * + * Convert link speed supported by HW to link speed supported by virtchnl. + * If adv_link_support is true, then return link speed in Mbps. Else return + * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller + * needs to cast back to an enum virtchnl_link_speed in the case where + * adv_link_support is false, but when adv_link_support is true the caller can + * expect the speed in Mbps. + */ +u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) +{ + u32 speed; + + if (adv_link_support) + switch (link_speed) { + case ICE_AQ_LINK_SPEED_10MB: + speed = ICE_LINK_SPEED_10MBPS; + break; + case ICE_AQ_LINK_SPEED_100MB: + speed = ICE_LINK_SPEED_100MBPS; + break; + case ICE_AQ_LINK_SPEED_1000MB: + speed = ICE_LINK_SPEED_1000MBPS; + break; + case ICE_AQ_LINK_SPEED_2500MB: + speed = ICE_LINK_SPEED_2500MBPS; + break; + case ICE_AQ_LINK_SPEED_5GB: + speed = ICE_LINK_SPEED_5000MBPS; + break; + case ICE_AQ_LINK_SPEED_10GB: + speed = ICE_LINK_SPEED_10000MBPS; + break; + case ICE_AQ_LINK_SPEED_20GB: + speed = ICE_LINK_SPEED_20000MBPS; + break; + case ICE_AQ_LINK_SPEED_25GB: + speed = ICE_LINK_SPEED_25000MBPS; + break; + case ICE_AQ_LINK_SPEED_40GB: + speed = ICE_LINK_SPEED_40000MBPS; + break; + default: + speed = ICE_LINK_SPEED_UNKNOWN; + break; + } + else + /* Virtchnl speeds are not defined for every speed supported in + * the hardware. To maintain compatibility with older AVF + * drivers, while reporting the speed the new speed values are + * resolved to the closest known virtchnl speeds + */ + switch (link_speed) { + case ICE_AQ_LINK_SPEED_10MB: + case ICE_AQ_LINK_SPEED_100MB: + speed = (u32)VIRTCHNL_LINK_SPEED_100MB; + break; + case ICE_AQ_LINK_SPEED_1000MB: + case ICE_AQ_LINK_SPEED_2500MB: + case ICE_AQ_LINK_SPEED_5GB: + speed = (u32)VIRTCHNL_LINK_SPEED_1GB; + break; + case ICE_AQ_LINK_SPEED_10GB: + speed = (u32)VIRTCHNL_LINK_SPEED_10GB; + break; + case ICE_AQ_LINK_SPEED_20GB: + speed = (u32)VIRTCHNL_LINK_SPEED_20GB; + break; + case ICE_AQ_LINK_SPEED_25GB: + speed = (u32)VIRTCHNL_LINK_SPEED_25GB; + break; + case ICE_AQ_LINK_SPEED_40GB: + /* fall through */ + speed = (u32)VIRTCHNL_LINK_SPEED_40GB; + break; + default: + speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN; + break; + } + + return speed; +} diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h new file mode 100644 index 000000000000..3d78a0795138 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_SRIOV_H_ +#define _ICE_SRIOV_H_ + +#include "ice_common.h" + +#ifdef CONFIG_PCI_IOV +enum ice_status +ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, + u8 *msg, u16 msglen, struct ice_sq_cd *cd); + +u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); +#else /* CONFIG_PCI_IOV */ +static inline enum ice_status +ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw, + u16 __always_unused vfid, u32 __always_unused v_opcode, + u32 __always_unused v_retval, u8 __always_unused *msg, + u16 __always_unused msglen, + struct ice_sq_cd __always_unused *cd) +{ + return 0; +} + +static inline u32 +ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support, + u16 __always_unused link_speed) +{ + return 0; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* _ICE_SRIOV_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h index 9a95c4ffd7d7..f49f299ddf2c 100644 --- a/drivers/net/ethernet/intel/ice/ice_status.h +++ b/drivers/net/ethernet/intel/ice/ice_status.h @@ -6,6 +6,9 @@ /* Error Codes */ enum ice_status { + ICE_SUCCESS = 0, + + /* Generic codes : Range -1..-49 */ ICE_ERR_PARAM = -1, ICE_ERR_NOT_IMPL = -2, ICE_ERR_NOT_READY = -3, @@ -20,6 +23,7 @@ enum ice_status { ICE_ERR_ALREADY_EXISTS = -14, ICE_ERR_DOES_NOT_EXIST = -15, ICE_ERR_MAX_LIMIT = -17, + ICE_ERR_RESET_ONGOING = -18, ICE_ERR_BUF_TOO_SHORT = -52, ICE_ERR_NVM_BLANK_MODE = -53, ICE_ERR_AQ_ERROR = -100, diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 6b7ec2ae5ad6..33403f39f1b3 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -86,6 +86,36 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, } /** + * ice_init_def_sw_recp - initialize the recipe book keeping tables + * @hw: pointer to the hw struct + * + * Allocate memory for the entire recipe table and initialize the structures/ + * entries corresponding to basic recipes. + */ +enum ice_status +ice_init_def_sw_recp(struct ice_hw *hw) +{ + struct ice_sw_recipe *recps; + u8 i; + + recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, + sizeof(struct ice_sw_recipe), GFP_KERNEL); + if (!recps) + return ICE_ERR_NO_MEMORY; + + for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + recps[i].root_rid = i; + INIT_LIST_HEAD(&recps[i].filt_rules); + INIT_LIST_HEAD(&recps[i].filt_replay_rules); + mutex_init(&recps[i].filt_rule_lock); + } + + hw->switch_info->recp_list = recps; + + return 0; +} + +/** * ice_aq_get_sw_cfg - get switch configuration * @hw: pointer to the hardware structure * @buf: pointer to the result buffer @@ -140,23 +170,24 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf, * * Add a VSI context to the hardware (0x0210) */ -enum ice_status +static enum ice_status ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *res; struct ice_aqc_add_get_update_free_vsi *cmd; - enum ice_status status; struct ice_aq_desc desc; + enum ice_status status; cmd = &desc.params.vsi_cmd; - res = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw; + res = &desc.params.add_update_free_vsi_res; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); if (!vsi_ctx->alloc_from_pool) cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); + cmd->vf_id = vsi_ctx->vf_num; cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); @@ -175,6 +206,42 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, } /** + * ice_aq_free_vsi + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a VSI context struct + * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources + * @cd: pointer to command details structure or NULL + * + * Free VSI context info from hardware (0x0213) + */ +static enum ice_status +ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_update_free_vsi_resp *resp; + struct ice_aqc_add_get_update_free_vsi *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.vsi_cmd; + resp = &desc.params.add_update_free_vsi_res; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); + + cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); + if (keep_vsi_alloc) + cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (!status) { + vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); + vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + } + + return status; +} + +/** * ice_aq_update_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a VSI context struct @@ -182,7 +249,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, * * Update VSI context in the hardware (0x0211) */ -enum ice_status +static enum ice_status ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { @@ -192,7 +259,7 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, enum ice_status status; cmd = &desc.params.vsi_cmd; - resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw; + resp = &desc.params.add_update_free_vsi_res; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); @@ -212,42 +279,162 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, } /** - * ice_aq_free_vsi + * ice_is_vsi_valid - check whether the VSI is valid or not + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * + * check whether the VSI is valid or not + */ +bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) +{ + return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; +} + +/** + * ice_get_hw_vsi_num - return the hw VSI number + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * + * return the hw VSI number + * Caution: call this function only if VSI is valid (ice_is_vsi_valid) + */ +u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) +{ + return hw->vsi_ctx[vsi_handle]->vsi_num; +} + +/** + * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * + * return the VSI context entry for a given VSI handle + */ +struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) +{ + return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; +} + +/** + * ice_save_vsi_ctx - save the VSI context for a given VSI handle + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * @vsi: VSI context pointer + * + * save the VSI context entry for a given VSI handle + */ +static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, + struct ice_vsi_ctx *vsi) +{ + hw->vsi_ctx[vsi_handle] = vsi; +} + +/** + * ice_clear_vsi_ctx - clear the VSI context entry + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * + * clear the VSI context entry + */ +static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) +{ + struct ice_vsi_ctx *vsi; + + vsi = ice_get_vsi_ctx(hw, vsi_handle); + if (vsi) { + devm_kfree(ice_hw_to_dev(hw), vsi); + hw->vsi_ctx[vsi_handle] = NULL; + } +} + +/** + * ice_add_vsi - add VSI context to the hardware and VSI handle list * @hw: pointer to the hw struct + * @vsi_handle: unique VSI handle provided by drivers * @vsi_ctx: pointer to a VSI context struct - * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources * @cd: pointer to command details structure or NULL * - * Get VSI context info from hardware (0x0213) + * Add a VSI context to the hardware also add it into the VSI handle list. + * If this function gets called after reset for existing VSIs then update + * with the new HW VSI number in the corresponding VSI handle list entry. */ enum ice_status -ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, - bool keep_vsi_alloc, struct ice_sq_cd *cd) +ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd) { - struct ice_aqc_add_update_free_vsi_resp *resp; - struct ice_aqc_add_get_update_free_vsi *cmd; - struct ice_aq_desc desc; + struct ice_vsi_ctx *tmp_vsi_ctx; enum ice_status status; - cmd = &desc.params.vsi_cmd; - resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); + if (vsi_handle >= ICE_MAX_VSI) + return ICE_ERR_PARAM; + status = ice_aq_add_vsi(hw, vsi_ctx, cd); + if (status) + return status; + tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!tmp_vsi_ctx) { + /* Create a new vsi context */ + tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*tmp_vsi_ctx), GFP_KERNEL); + if (!tmp_vsi_ctx) { + ice_aq_free_vsi(hw, vsi_ctx, false, cd); + return ICE_ERR_NO_MEMORY; + } + *tmp_vsi_ctx = *vsi_ctx; + ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); + } else { + /* update with new HW VSI num */ + if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num) + tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; + } - cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); - if (keep_vsi_alloc) - cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); + return status; +} - status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); - if (!status) { - vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); - vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); - } +/** + * ice_free_vsi- free VSI context from hardware and VSI handle list + * @hw: pointer to the hw struct + * @vsi_handle: unique VSI handle + * @vsi_ctx: pointer to a VSI context struct + * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources + * @cd: pointer to command details structure or NULL + * + * Free VSI context info from hardware as well as from VSI handle list + */ +enum ice_status +ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd) +{ + enum ice_status status; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); + status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); + if (!status) + ice_clear_vsi_ctx(hw, vsi_handle); return status; } /** + * ice_update_vsi + * @hw: pointer to the hw struct + * @vsi_handle: unique VSI handle + * @vsi_ctx: pointer to a VSI context struct + * @cd: pointer to command details structure or NULL + * + * Update VSI context in the hardware + */ +enum ice_status +ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd) +{ + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); + return ice_aq_update_vsi(hw, vsi_ctx, cd); +} + +/** * ice_aq_alloc_free_vsi_list * @hw: pointer to the hw struct * @vsi_list_id: VSI list id returned or used for lookup @@ -464,10 +651,12 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) { u16 vlan_id = ICE_MAX_VLAN_ID + 1; - u8 eth_hdr[DUMMY_ETH_HDR_LEN]; void *daddr = NULL; + u16 eth_hdr_sz; + u8 *eth_hdr; u32 act = 0; __be16 *off; + u8 q_rgn; if (opc == ice_aqc_opc_remove_sw_rules) { s_rule->pdata.lkup_tx_rx.act = 0; @@ -477,13 +666,16 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, return; } + eth_hdr_sz = sizeof(dummy_eth_header); + eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; + /* initialize the ether header with a dummy header */ - memcpy(eth_hdr, dummy_eth_header, sizeof(dummy_eth_header)); + memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); ice_fill_sw_info(hw, f_info); switch (f_info->fltr_act) { case ICE_FWD_TO_VSI: - act |= (f_info->fwd_id.vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & + act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; if (f_info->lkup_type != ICE_SW_LKUP_VLAN) act |= ICE_SINGLE_ACT_VSI_FORWARDING | @@ -503,14 +695,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & ICE_SINGLE_ACT_Q_INDEX_M; break; + case ICE_DROP_PACKET: + act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | + ICE_SINGLE_ACT_VALID_BIT; + break; case ICE_FWD_TO_QGRP: + q_rgn = f_info->qgrp_size > 0 ? + (u8)ilog2(f_info->qgrp_size) : 0; act |= ICE_SINGLE_ACT_TO_Q; - act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) & + act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & ICE_SINGLE_ACT_Q_REGION_M; break; - case ICE_DROP_PACKET: - act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP; - break; default: return; } @@ -536,7 +733,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, daddr = f_info->l_data.ethertype_mac.mac_addr; /* fall-through */ case ICE_SW_LKUP_ETHERTYPE: - off = (__be16 *)ð_hdr[ICE_ETH_ETHTYPE_OFFSET]; + off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); break; case ICE_SW_LKUP_MAC_VLAN: @@ -563,18 +760,16 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); if (daddr) - ether_addr_copy(ð_hdr[ICE_ETH_DA_OFFSET], daddr); + ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); if (!(vlan_id > ICE_MAX_VLAN_ID)) { - off = (__be16 *)ð_hdr[ICE_ETH_VLAN_TCI_OFFSET]; + off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); *off = cpu_to_be16(vlan_id); } /* Create the switch rule with the final dummy Ethernet header */ if (opc != ice_aqc_opc_update_sw_rules) - s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(sizeof(eth_hdr)); - - memcpy(s_rule->pdata.lkup_tx_rx.hdr, eth_hdr, sizeof(eth_hdr)); + s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); } /** @@ -601,8 +796,8 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, enum ice_status status; u16 lg_act_size; u16 rules_size; - u16 vsi_info; u32 act; + u16 id; if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) return ICE_ERR_PARAM; @@ -628,12 +823,11 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, /* First action VSI forwarding or VSI list forwarding depending on how * many VSIs */ - vsi_info = (m_ent->vsi_count > 1) ? - m_ent->fltr_info.fwd_id.vsi_list_id : - m_ent->fltr_info.fwd_id.vsi_id; + id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : + m_ent->fltr_info.fwd_id.hw_vsi_id; act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; - act |= (vsi_info << ICE_LG_ACT_VSI_LIST_ID_S) & + act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; if (m_ent->vsi_count > 1) act |= ICE_LG_ACT_VSI_LIST; @@ -686,15 +880,15 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, /** * ice_create_vsi_list_map * @hw: pointer to the hardware structure - * @vsi_array: array of VSIs to form a VSI list - * @num_vsi: num VSI in the array + * @vsi_handle_arr: array of VSI handles to set in the VSI mapping + * @num_vsi: number of VSI handles in the array * @vsi_list_id: VSI list id generated as part of allocate resource * * Helper function to create a new entry of VSI list id to VSI mapping * using the given VSI list id */ static struct ice_vsi_list_map_info * -ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, +ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 vsi_list_id) { struct ice_switch_info *sw = hw->switch_info; @@ -706,9 +900,9 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, return NULL; v_map->vsi_list_id = vsi_list_id; - + v_map->ref_cnt = 1; for (i = 0; i < num_vsi; i++) - set_bit(vsi_array[i], v_map->vsi_map); + set_bit(vsi_handle_arr[i], v_map->vsi_map); list_add(&v_map->list_entry, &sw->vsi_list_map_head); return v_map; @@ -717,8 +911,8 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, /** * ice_update_vsi_list_rule * @hw: pointer to the hardware structure - * @vsi_array: array of VSIs to form a VSI list - * @num_vsi: num VSI in the array + * @vsi_handle_arr: array of VSI handles to form a VSI list + * @num_vsi: number of VSI handles in the array * @vsi_list_id: VSI list id generated as part of allocate resource * @remove: Boolean value to indicate if this is a remove action * @opc: switch rules population command type - pass in the command opcode @@ -728,7 +922,7 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, * using the given VSI list id */ static enum ice_status -ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, +ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, enum ice_sw_lkup_type lkup_type) { @@ -759,9 +953,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) return ICE_ERR_NO_MEMORY; - - for (i = 0; i < num_vsi; i++) - s_rule->pdata.vsi_list.vsi[i] = cpu_to_le16(vsi_array[i]); + for (i = 0; i < num_vsi; i++) { + if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { + status = ICE_ERR_PARAM; + goto exit; + } + /* AQ call requires hw_vsi_id(s) */ + s_rule->pdata.vsi_list.vsi[i] = + cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); + } s_rule->type = cpu_to_le16(type); s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); @@ -769,6 +969,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); +exit: devm_kfree(ice_hw_to_dev(hw), s_rule); return status; } @@ -776,21 +977,16 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, /** * ice_create_vsi_list_rule - Creates and populates a VSI list rule * @hw: pointer to the hw struct - * @vsi_array: array of VSIs to form a VSI list - * @num_vsi: number of VSIs in the array + * @vsi_handle_arr: array of VSI handles to form a VSI list + * @num_vsi: number of VSI handles in the array * @vsi_list_id: stores the ID of the VSI list to be created * @lkup_type: switch rule filter's lookup type */ static enum ice_status -ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, +ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) { enum ice_status status; - int i; - - for (i = 0; i < num_vsi; i++) - if (vsi_array[i] >= ICE_MAX_VSI) - return ICE_ERR_OUT_OF_RANGE; status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, ice_aqc_opc_alloc_res); @@ -798,9 +994,9 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi, return status; /* Update the newly created VSI list to include the specified VSIs */ - return ice_update_vsi_list_rule(hw, vsi_array, num_vsi, *vsi_list_id, - false, ice_aqc_opc_add_sw_rules, - lkup_type); + return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, + *vsi_list_id, false, + ice_aqc_opc_add_sw_rules, lkup_type); } /** @@ -816,10 +1012,10 @@ static enum ice_status ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { - struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_aqc_sw_rules_elem *s_rule; enum ice_sw_lkup_type l_type; + struct ice_sw_recipe *recp; enum ice_status status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), @@ -860,31 +1056,9 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw, * calls remove filter AQ command */ l_type = fm_entry->fltr_info.lkup_type; - if (l_type == ICE_SW_LKUP_MAC) { - mutex_lock(&sw->mac_list_lock); - list_add(&fm_entry->list_entry, &sw->mac_list_head); - mutex_unlock(&sw->mac_list_lock); - } else if (l_type == ICE_SW_LKUP_VLAN) { - mutex_lock(&sw->vlan_list_lock); - list_add(&fm_entry->list_entry, &sw->vlan_list_head); - mutex_unlock(&sw->vlan_list_lock); - } else if (l_type == ICE_SW_LKUP_ETHERTYPE || - l_type == ICE_SW_LKUP_ETHERTYPE_MAC) { - mutex_lock(&sw->eth_m_list_lock); - list_add(&fm_entry->list_entry, &sw->eth_m_list_head); - mutex_unlock(&sw->eth_m_list_lock); - } else if (l_type == ICE_SW_LKUP_PROMISC || - l_type == ICE_SW_LKUP_PROMISC_VLAN) { - mutex_lock(&sw->promisc_list_lock); - list_add(&fm_entry->list_entry, &sw->promisc_list_head); - mutex_unlock(&sw->promisc_list_lock); - } else if (fm_entry->fltr_info.lkup_type == ICE_SW_LKUP_MAC_VLAN) { - mutex_lock(&sw->mac_vlan_list_lock); - list_add(&fm_entry->list_entry, &sw->mac_vlan_list_head); - mutex_unlock(&sw->mac_vlan_list_lock); - } else { - status = ICE_ERR_NOT_IMPL; - } + recp = &hw->switch_info->recp_list[l_type]; + list_add(&fm_entry->list_entry, &recp->filt_rules); + ice_create_pkt_fwd_rule_exit: devm_kfree(ice_hw_to_dev(hw), s_rule); return status; @@ -893,19 +1067,15 @@ ice_create_pkt_fwd_rule_exit: /** * ice_update_pkt_fwd_rule * @hw: pointer to the hardware structure - * @rule_id: rule of previously created switch rule to update - * @vsi_list_id: VSI list id to be updated with - * @f_info: ice_fltr_info to pull other information for switch rule + * @f_info: filter information for switch rule * * Call AQ command to update a previously created switch rule with a * VSI list id */ static enum ice_status -ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, - struct ice_fltr_info f_info) +ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) { struct ice_aqc_sw_rules_elem *s_rule; - struct ice_fltr_info tmp_fltr; enum ice_status status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), @@ -913,14 +1083,9 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, if (!s_rule) return ICE_ERR_NO_MEMORY; - tmp_fltr = f_info; - tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; - tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; + ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); - ice_fill_sw_rule(hw, &tmp_fltr, s_rule, - ice_aqc_opc_update_sw_rules); - - s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id); + s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id); /* Update switch rule with new rule set to forward VSI list */ status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, @@ -931,7 +1096,48 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, } /** - * ice_handle_vsi_list_mgmt + * ice_update_sw_rule_bridge_mode + * @hw: pointer to the hw struct + * + * Updates unicast switch filter rules based on VEB/VEPA mode + */ +enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *fm_entry; + enum ice_status status = 0; + struct list_head *rule_head; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + + rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; + rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; + + mutex_lock(rule_lock); + list_for_each_entry(fm_entry, rule_head, list_entry) { + struct ice_fltr_info *fi = &fm_entry->fltr_info; + u8 *addr = fi->l_data.mac.mac_addr; + + /* Update unicast Tx rules to reflect the selected + * VEB/VEPA mode + */ + if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) && + (fi->fltr_act == ICE_FWD_TO_VSI || + fi->fltr_act == ICE_FWD_TO_VSI_LIST || + fi->fltr_act == ICE_FWD_TO_Q || + fi->fltr_act == ICE_FWD_TO_QGRP)) { + status = ice_update_pkt_fwd_rule(hw, fi); + if (status) + break; + } + } + + mutex_unlock(rule_lock); + + return status; +} + +/** + * ice_add_update_vsi_list * @hw: pointer to the hardware structure * @m_entry: pointer to current filter management list entry * @cur_fltr: filter information from the book keeping entry @@ -952,10 +1158,10 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, * using the update switch rule command */ static enum ice_status -ice_handle_vsi_list_mgmt(struct ice_hw *hw, - struct ice_fltr_mgmt_list_entry *m_entry, - struct ice_fltr_info *cur_fltr, - struct ice_fltr_info *new_fltr) +ice_add_update_vsi_list(struct ice_hw *hw, + struct ice_fltr_mgmt_list_entry *m_entry, + struct ice_fltr_info *cur_fltr, + struct ice_fltr_info *new_fltr) { enum ice_status status = 0; u16 vsi_list_id = 0; @@ -975,34 +1181,36 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw, * a part of a VSI list. So, create a VSI list with the old and * new VSIs. */ - u16 vsi_id_arr[2]; - u16 fltr_rule; + struct ice_fltr_info tmp_fltr; + u16 vsi_handle_arr[2]; /* A rule already exists with the new VSI being added */ - if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id) + if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) return ICE_ERR_ALREADY_EXISTS; - vsi_id_arr[0] = cur_fltr->fwd_id.vsi_id; - vsi_id_arr[1] = new_fltr->fwd_id.vsi_id; - status = ice_create_vsi_list_rule(hw, &vsi_id_arr[0], 2, + vsi_handle_arr[0] = cur_fltr->vsi_handle; + vsi_handle_arr[1] = new_fltr->vsi_handle; + status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, &vsi_list_id, new_fltr->lkup_type); if (status) return status; - fltr_rule = cur_fltr->fltr_rule_id; + tmp_fltr = *new_fltr; + tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; + tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; /* Update the previous switch rule of "MAC forward to VSI" to * "MAC fwd to VSI list" */ - status = ice_update_pkt_fwd_rule(hw, fltr_rule, vsi_list_id, - *new_fltr); + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); if (status) return status; cur_fltr->fwd_id.vsi_list_id = vsi_list_id; cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; m_entry->vsi_list_info = - ice_create_vsi_list_map(hw, &vsi_id_arr[0], 2, + ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, vsi_list_id); /* If this entry was large action then the large action needs @@ -1014,11 +1222,11 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw, m_entry->sw_marker_id, m_entry->lg_act_idx); } else { - u16 vsi_id = new_fltr->fwd_id.vsi_id; + u16 vsi_handle = new_fltr->vsi_handle; enum ice_adminq_opc opcode; /* A rule already exists with the new VSI being added */ - if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map)) + if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) return 0; /* Update the previously created VSI list set with @@ -1027,12 +1235,12 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw, vsi_list_id = cur_fltr->fwd_id.vsi_list_id; opcode = ice_aqc_opc_update_sw_rules; - status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id, - false, opcode, + status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, + vsi_list_id, false, opcode, new_fltr->lkup_type); /* update VSI list mapping info with new VSI id */ if (!status) - set_bit(vsi_id, m_entry->vsi_list_info->vsi_map); + set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); } if (!status) m_entry->vsi_count++; @@ -1040,54 +1248,313 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw, } /** - * ice_find_mac_entry + * ice_find_rule_entry - Search a rule entry * @hw: pointer to the hardware structure - * @mac_addr: MAC address to search for + * @recp_id: lookup type for which the specified rule needs to be searched + * @f_info: rule information * - * Helper function to search for a MAC entry using a given MAC address - * Returns pointer to the entry if found. + * Helper function to search for a given rule entry + * Returns pointer to entry storing the rule if found */ static struct ice_fltr_mgmt_list_entry * -ice_find_mac_entry(struct ice_hw *hw, u8 *mac_addr) +ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) { - struct ice_fltr_mgmt_list_entry *m_list_itr, *mac_ret = NULL; + struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; struct ice_switch_info *sw = hw->switch_info; - - mutex_lock(&sw->mac_list_lock); - list_for_each_entry(m_list_itr, &sw->mac_list_head, list_entry) { - u8 *buf = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; - - if (ether_addr_equal(buf, mac_addr)) { - mac_ret = m_list_itr; + struct list_head *list_head; + + list_head = &sw->recp_list[recp_id].filt_rules; + list_for_each_entry(list_itr, list_head, list_entry) { + if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, + sizeof(f_info->l_data)) && + f_info->flag == list_itr->fltr_info.flag) { + ret = list_itr; break; } } - mutex_unlock(&sw->mac_list_lock); - return mac_ret; + return ret; +} + +/** + * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 + * @hw: pointer to the hardware structure + * @recp_id: lookup type for which VSI lists needs to be searched + * @vsi_handle: VSI handle to be found in VSI list + * @vsi_list_id: VSI list id found containing vsi_handle + * + * Helper function to search a VSI list with single entry containing given VSI + * handle element. This can be extended further to search VSI list with more + * than 1 vsi_count. Returns pointer to VSI list entry if found. + */ +static struct ice_vsi_list_map_info * +ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, + u16 *vsi_list_id) +{ + struct ice_vsi_list_map_info *map_info = NULL; + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *list_itr; + struct list_head *list_head; + + list_head = &sw->recp_list[recp_id].filt_rules; + list_for_each_entry(list_itr, list_head, list_entry) { + if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { + map_info = list_itr->vsi_list_info; + if (test_bit(vsi_handle, map_info->vsi_map)) { + *vsi_list_id = map_info->vsi_list_id; + return map_info; + } + } + } + return NULL; } /** - * ice_add_shared_mac - Add one MAC shared filter rule + * ice_add_rule_internal - add rule for a given lookup type * @hw: pointer to the hardware structure + * @recp_id: lookup type (recipe id) for which rule has to be added * @f_entry: structure containing MAC forwarding information * - * Adds or updates the book keeping list for the MAC addresses + * Adds or updates the rule lists for a given recipe */ static enum ice_status -ice_add_shared_mac(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) +ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, + struct ice_fltr_list_entry *f_entry) { + struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_info *new_fltr, *cur_fltr; struct ice_fltr_mgmt_list_entry *m_entry; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = 0; - new_fltr = &f_entry->fltr_info; + if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) + return ICE_ERR_PARAM; + f_entry->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); + + rule_lock = &sw->recp_list[recp_id].filt_rule_lock; - m_entry = ice_find_mac_entry(hw, &new_fltr->l_data.mac.mac_addr[0]); - if (!m_entry) + mutex_lock(rule_lock); + new_fltr = &f_entry->fltr_info; + if (new_fltr->flag & ICE_FLTR_RX) + new_fltr->src = hw->port_info->lport; + else if (new_fltr->flag & ICE_FLTR_TX) + new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id; + + m_entry = ice_find_rule_entry(hw, recp_id, new_fltr); + if (!m_entry) { + mutex_unlock(rule_lock); return ice_create_pkt_fwd_rule(hw, f_entry); + } cur_fltr = &m_entry->fltr_info; + status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); + mutex_unlock(rule_lock); - return ice_handle_vsi_list_mgmt(hw, m_entry, cur_fltr, new_fltr); + return status; +} + +/** + * ice_remove_vsi_list_rule + * @hw: pointer to the hardware structure + * @vsi_list_id: VSI list id generated as part of allocate resource + * @lkup_type: switch rule filter lookup type + * + * The VSI list should be emptied before this function is called to remove the + * VSI list. + */ +static enum ice_status +ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, + enum ice_sw_lkup_type lkup_type) +{ + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_status status; + u16 s_rule_size; + + s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); + s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); + s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); + + /* Free the vsi_list resource that we allocated. It is assumed that the + * list is empty at this point. + */ + status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, + ice_aqc_opc_free_res); + + devm_kfree(ice_hw_to_dev(hw), s_rule); + return status; +} + +/** + * ice_rem_update_vsi_list + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle of the VSI to remove + * @fm_list: filter management entry for which the VSI list management needs to + * be done + */ +static enum ice_status +ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, + struct ice_fltr_mgmt_list_entry *fm_list) +{ + enum ice_sw_lkup_type lkup_type; + enum ice_status status = 0; + u16 vsi_list_id; + + if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || + fm_list->vsi_count == 0) + return ICE_ERR_PARAM; + + /* A rule with the VSI being removed does not exist */ + if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) + return ICE_ERR_DOES_NOT_EXIST; + + lkup_type = fm_list->fltr_info.lkup_type; + vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; + status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + fm_list->vsi_count--; + clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); + + if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { + struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; + struct ice_vsi_list_map_info *vsi_list_info = + fm_list->vsi_list_info; + u16 rem_vsi_handle; + + rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, + ICE_MAX_VSI); + if (!ice_is_vsi_valid(hw, rem_vsi_handle)) + return ICE_ERR_OUT_OF_RANGE; + + /* Make sure VSI list is empty before removing it below */ + status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, + vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp_fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, rem_vsi_handle); + tmp_fltr_info.vsi_handle = rem_vsi_handle; + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); + if (status) { + ice_debug(hw, ICE_DBG_SW, + "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", + tmp_fltr_info.fwd_id.hw_vsi_id, status); + return status; + } + + fm_list->fltr_info = tmp_fltr_info; + } + + if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || + (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { + struct ice_vsi_list_map_info *vsi_list_info = + fm_list->vsi_list_info; + + /* Remove the VSI list since it is no longer used */ + status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); + if (status) { + ice_debug(hw, ICE_DBG_SW, + "Failed to remove VSI list %d, error %d\n", + vsi_list_id, status); + return status; + } + + list_del(&vsi_list_info->list_entry); + devm_kfree(ice_hw_to_dev(hw), vsi_list_info); + fm_list->vsi_list_info = NULL; + } + + return status; +} + +/** + * ice_remove_rule_internal - Remove a filter rule of a given type + * @hw: pointer to the hardware structure + * @recp_id: recipe id for which the rule needs to removed + * @f_entry: rule entry containing filter information + */ +static enum ice_status +ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, + struct ice_fltr_list_entry *f_entry) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *list_elem; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = 0; + bool remove_rule = false; + u16 vsi_handle; + + if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) + return ICE_ERR_PARAM; + f_entry->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); + + rule_lock = &sw->recp_list[recp_id].filt_rule_lock; + mutex_lock(rule_lock); + list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); + if (!list_elem) { + status = ICE_ERR_DOES_NOT_EXIST; + goto exit; + } + + if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { + remove_rule = true; + } else if (!list_elem->vsi_list_info) { + status = ICE_ERR_DOES_NOT_EXIST; + goto exit; + } else { + if (list_elem->vsi_list_info->ref_cnt > 1) + list_elem->vsi_list_info->ref_cnt--; + vsi_handle = f_entry->fltr_info.vsi_handle; + status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); + if (status) + goto exit; + /* if vsi count goes to zero after updating the vsi list */ + if (list_elem->vsi_count == 0) + remove_rule = true; + } + + if (remove_rule) { + /* Remove the lookup rule */ + struct ice_aqc_sw_rules_elem *s_rule; + + s_rule = devm_kzalloc(ice_hw_to_dev(hw), + ICE_SW_RULE_RX_TX_NO_HDR_SIZE, + GFP_KERNEL); + if (!s_rule) { + status = ICE_ERR_NO_MEMORY; + goto exit; + } + + ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, + ice_aqc_opc_remove_sw_rules); + + status = ice_aq_sw_rules(hw, s_rule, + ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, + ice_aqc_opc_remove_sw_rules, NULL); + if (status) + goto exit; + + /* Remove a book keeping from the list */ + devm_kfree(ice_hw_to_dev(hw), s_rule); + + list_del(&list_elem->list_entry); + devm_kfree(ice_hw_to_dev(hw), list_elem); + } +exit: + mutex_unlock(rule_lock); + return status; } /** @@ -1106,7 +1573,10 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_aqc_sw_rules_elem *s_rule, *r_iter; struct ice_fltr_list_entry *m_list_itr; + struct list_head *rule_head; u16 elem_sent, total_elem_left; + struct ice_switch_info *sw; + struct mutex *rule_lock; /* Lock to protect filter rule list */ enum ice_status status = 0; u16 num_unicast = 0; u16 s_rule_size; @@ -1114,48 +1584,73 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) if (!m_list || !hw) return ICE_ERR_PARAM; + s_rule = NULL; + sw = hw->switch_info; + rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; list_for_each_entry(m_list_itr, m_list, list_entry) { u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; + u16 vsi_handle; + u16 hw_vsi_id; - if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC) + m_list_itr->fltr_info.flag = ICE_FLTR_TX; + vsi_handle = m_list_itr->fltr_info.vsi_handle; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; + /* update the src in case it is vsi num */ + if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) return ICE_ERR_PARAM; - if (is_zero_ether_addr(add)) + m_list_itr->fltr_info.src = hw_vsi_id; + if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || + is_zero_ether_addr(add)) return ICE_ERR_PARAM; if (is_unicast_ether_addr(add) && !hw->ucast_shared) { /* Don't overwrite the unicast address */ - if (ice_find_mac_entry(hw, add)) + mutex_lock(rule_lock); + if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, + &m_list_itr->fltr_info)) { + mutex_unlock(rule_lock); return ICE_ERR_ALREADY_EXISTS; + } + mutex_unlock(rule_lock); num_unicast++; } else if (is_multicast_ether_addr(add) || (is_unicast_ether_addr(add) && hw->ucast_shared)) { - status = ice_add_shared_mac(hw, m_list_itr); - if (status) { - m_list_itr->status = ICE_FLTR_STATUS_FW_FAIL; - return status; - } - m_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS; + m_list_itr->status = + ice_add_rule_internal(hw, ICE_SW_LKUP_MAC, + m_list_itr); + if (m_list_itr->status) + return m_list_itr->status; } } + mutex_lock(rule_lock); /* Exit if no suitable entries were found for adding bulk switch rule */ - if (!num_unicast) - return 0; + if (!num_unicast) { + status = 0; + goto ice_add_mac_exit; + } + + rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; /* Allocate switch rule buffer for the bulk update for unicast */ s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; + if (!s_rule) { + status = ICE_ERR_NO_MEMORY; + goto ice_add_mac_exit; + } r_iter = s_rule; list_for_each_entry(m_list_itr, m_list, list_entry) { struct ice_fltr_info *f_info = &m_list_itr->fltr_info; - u8 *addr = &f_info->l_data.mac.mac_addr[0]; + u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; - if (is_unicast_ether_addr(addr)) { - ice_fill_sw_rule(hw, &m_list_itr->fltr_info, - r_iter, ice_aqc_opc_add_sw_rules); + if (is_unicast_ether_addr(mac_addr)) { + ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, + ice_aqc_opc_add_sw_rules); r_iter = (struct ice_aqc_sw_rules_elem *) ((u8 *)r_iter + s_rule_size); } @@ -1183,11 +1678,10 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) r_iter = s_rule; list_for_each_entry(m_list_itr, m_list, list_entry) { struct ice_fltr_info *f_info = &m_list_itr->fltr_info; - u8 *addr = &f_info->l_data.mac.mac_addr[0]; - struct ice_switch_info *sw = hw->switch_info; + u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; struct ice_fltr_mgmt_list_entry *fm_entry; - if (is_unicast_ether_addr(addr)) { + if (is_unicast_ether_addr(mac_addr)) { f_info->fltr_rule_id = le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); f_info->fltr_act = ICE_FWD_TO_VSI; @@ -1203,46 +1697,21 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) /* The book keeping entries will get removed when * base driver calls remove filter AQ command */ - mutex_lock(&sw->mac_list_lock); - list_add(&fm_entry->list_entry, &sw->mac_list_head); - mutex_unlock(&sw->mac_list_lock); + list_add(&fm_entry->list_entry, rule_head); r_iter = (struct ice_aqc_sw_rules_elem *) ((u8 *)r_iter + s_rule_size); } } ice_add_mac_exit: - devm_kfree(ice_hw_to_dev(hw), s_rule); + mutex_unlock(rule_lock); + if (s_rule) + devm_kfree(ice_hw_to_dev(hw), s_rule); return status; } /** - * ice_find_vlan_entry - * @hw: pointer to the hardware structure - * @vlan_id: VLAN id to search for - * - * Helper function to search for a VLAN entry using a given VLAN id - * Returns pointer to the entry if found. - */ -static struct ice_fltr_mgmt_list_entry * -ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id) -{ - struct ice_fltr_mgmt_list_entry *vlan_list_itr, *vlan_ret = NULL; - struct ice_switch_info *sw = hw->switch_info; - - mutex_lock(&sw->vlan_list_lock); - list_for_each_entry(vlan_list_itr, &sw->vlan_list_head, list_entry) - if (vlan_list_itr->fltr_info.l_data.vlan.vlan_id == vlan_id) { - vlan_ret = vlan_list_itr; - break; - } - - mutex_unlock(&sw->vlan_list_lock); - return vlan_ret; -} - -/** * ice_add_vlan_internal - Add one VLAN based filter rule * @hw: pointer to the hardware structure * @f_entry: filter entry containing one VLAN information @@ -1250,53 +1719,150 @@ ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id) static enum ice_status ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { - struct ice_fltr_info *new_fltr, *cur_fltr; + struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *v_list_itr; - u16 vlan_id; + struct ice_fltr_info *new_fltr, *cur_fltr; + enum ice_sw_lkup_type lkup_type; + u16 vsi_list_id = 0, vsi_handle; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = 0; + + if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) + return ICE_ERR_PARAM; + f_entry->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); new_fltr = &f_entry->fltr_info; + /* VLAN id should only be 12 bits */ if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) return ICE_ERR_PARAM; - vlan_id = new_fltr->l_data.vlan.vlan_id; - v_list_itr = ice_find_vlan_entry(hw, vlan_id); + if (new_fltr->src_id != ICE_SRC_ID_VSI) + return ICE_ERR_PARAM; + + new_fltr->src = new_fltr->fwd_id.hw_vsi_id; + lkup_type = new_fltr->lkup_type; + vsi_handle = new_fltr->vsi_handle; + rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; + mutex_lock(rule_lock); + v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); if (!v_list_itr) { - u16 vsi_id = ICE_VSI_INVAL_ID; - enum ice_status status; - u16 vsi_list_id = 0; + struct ice_vsi_list_map_info *map_info = NULL; if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { - enum ice_sw_lkup_type lkup_type = new_fltr->lkup_type; - - /* All VLAN pruning rules use a VSI list. - * Convert the action to forwarding to a VSI list. + /* All VLAN pruning rules use a VSI list. Check if + * there is already a VSI list containing VSI that we + * want to add. If found, use the same vsi_list_id for + * this new VLAN rule or else create a new list. */ - vsi_id = new_fltr->fwd_id.vsi_id; - status = ice_create_vsi_list_rule(hw, &vsi_id, 1, - &vsi_list_id, - lkup_type); - if (status) - return status; + map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN, + vsi_handle, + &vsi_list_id); + if (!map_info) { + status = ice_create_vsi_list_rule(hw, + &vsi_handle, + 1, + &vsi_list_id, + lkup_type); + if (status) + goto exit; + } + /* Convert the action to forwarding to a VSI list. */ new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; new_fltr->fwd_id.vsi_list_id = vsi_list_id; } status = ice_create_pkt_fwd_rule(hw, f_entry); - if (!status && vsi_id != ICE_VSI_INVAL_ID) { - v_list_itr = ice_find_vlan_entry(hw, vlan_id); - if (!v_list_itr) - return ICE_ERR_DOES_NOT_EXIST; - v_list_itr->vsi_list_info = - ice_create_vsi_list_map(hw, &vsi_id, 1, - vsi_list_id); + if (!status) { + v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, + new_fltr); + if (!v_list_itr) { + status = ICE_ERR_DOES_NOT_EXIST; + goto exit; + } + /* reuse VSI list for new rule and increment ref_cnt */ + if (map_info) { + v_list_itr->vsi_list_info = map_info; + map_info->ref_cnt++; + } else { + v_list_itr->vsi_list_info = + ice_create_vsi_list_map(hw, &vsi_handle, + 1, vsi_list_id); + } } + } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { + /* Update existing VSI list to add new VSI id only if it used + * by one VLAN rule. + */ + cur_fltr = &v_list_itr->fltr_info; + status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, + new_fltr); + } else { + /* If VLAN rule exists and VSI list being used by this rule is + * referenced by more than 1 VLAN rule. Then create a new VSI + * list appending previous VSI with new VSI and update existing + * VLAN rule to point to new VSI list id + */ + struct ice_fltr_info tmp_fltr; + u16 vsi_handle_arr[2]; + u16 cur_handle; - return status; + /* Current implementation only supports reusing VSI list with + * one VSI count. We should never hit below condition + */ + if (v_list_itr->vsi_count > 1 && + v_list_itr->vsi_list_info->ref_cnt > 1) { + ice_debug(hw, ICE_DBG_SW, + "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); + status = ICE_ERR_CFG; + goto exit; + } + + cur_handle = + find_first_bit(v_list_itr->vsi_list_info->vsi_map, + ICE_MAX_VSI); + + /* A rule already exists with the new VSI being added */ + if (cur_handle == vsi_handle) { + status = ICE_ERR_ALREADY_EXISTS; + goto exit; + } + + vsi_handle_arr[0] = cur_handle; + vsi_handle_arr[1] = vsi_handle; + status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, + &vsi_list_id, lkup_type); + if (status) + goto exit; + + tmp_fltr = v_list_itr->fltr_info; + tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; + tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; + /* Update the previous switch rule to a new VSI list which + * includes current VSI thats requested + */ + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); + if (status) + goto exit; + + /* before overriding VSI list map info. decrement ref_cnt of + * previous VSI list + */ + v_list_itr->vsi_list_info->ref_cnt--; + + /* now update to newly created list */ + v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; + v_list_itr->vsi_list_info = + ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, + vsi_list_id); + v_list_itr->vsi_count++; } - cur_fltr = &v_list_itr->fltr_info; - return ice_handle_vsi_list_mgmt(hw, v_list_itr, cur_fltr, new_fltr); +exit: + mutex_unlock(rule_lock); + return status; } /** @@ -1313,335 +1879,58 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) return ICE_ERR_PARAM; list_for_each_entry(v_list_itr, v_list, list_entry) { - enum ice_status status; - if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) return ICE_ERR_PARAM; - - status = ice_add_vlan_internal(hw, v_list_itr); - if (status) { - v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL; - return status; - } - v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS; + v_list_itr->fltr_info.flag = ICE_FLTR_TX; + v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); + if (v_list_itr->status) + return v_list_itr->status; } return 0; } /** - * ice_remove_vsi_list_rule + * ice_rem_sw_rule_info * @hw: pointer to the hardware structure - * @vsi_list_id: VSI list id generated as part of allocate resource - * @lkup_type: switch rule filter lookup type + * @rule_head: pointer to the switch list structure that we want to delete */ -static enum ice_status -ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, - enum ice_sw_lkup_type lkup_type) -{ - struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; - u16 s_rule_size; - - s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); - s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); - s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); - /* FW expects number of VSIs in vsi_list resource to be 0 for clear - * command. Since memory is zero'ed out during initialization, it's not - * necessary to explicitly initialize the variable to 0. - */ - - status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, - ice_aqc_opc_remove_sw_rules, NULL); - if (!status) - /* Free the vsi_list resource that we allocated */ - status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, - ice_aqc_opc_free_res); - - devm_kfree(ice_hw_to_dev(hw), s_rule); - return status; -} - -/** - * ice_handle_rem_vsi_list_mgmt - * @hw: pointer to the hardware structure - * @vsi_id: ID of the VSI to remove - * @fm_list_itr: filter management entry for which the VSI list management - * needs to be done - */ -static enum ice_status -ice_handle_rem_vsi_list_mgmt(struct ice_hw *hw, u16 vsi_id, - struct ice_fltr_mgmt_list_entry *fm_list_itr) +static void +ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) { - struct ice_switch_info *sw = hw->switch_info; - enum ice_status status = 0; - enum ice_sw_lkup_type lkup_type; - bool is_last_elem = true; - bool conv_list = false; - bool del_list = false; - u16 vsi_list_id; - - lkup_type = fm_list_itr->fltr_info.lkup_type; - vsi_list_id = fm_list_itr->fltr_info.fwd_id.vsi_list_id; - - if (fm_list_itr->vsi_count > 1) { - status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id, - true, - ice_aqc_opc_update_sw_rules, - lkup_type); - if (status) - return status; - fm_list_itr->vsi_count--; - is_last_elem = false; - clear_bit(vsi_id, fm_list_itr->vsi_list_info->vsi_map); - } - - /* For non-VLAN rules that forward packets to a VSI list, convert them - * to forwarding packets to a VSI if there is only one VSI left in the - * list. Unused lists are then removed. - * VLAN rules need to use VSI lists even with only one VSI. - */ - if (fm_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST) { - if (lkup_type == ICE_SW_LKUP_VLAN) { - del_list = is_last_elem; - } else if (fm_list_itr->vsi_count == 1) { - conv_list = true; - del_list = true; - } - } - - if (del_list) { - /* Remove the VSI list since it is no longer used */ - struct ice_vsi_list_map_info *vsi_list_info = - fm_list_itr->vsi_list_info; + if (!list_empty(rule_head)) { + struct ice_fltr_mgmt_list_entry *entry; + struct ice_fltr_mgmt_list_entry *tmp; - status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); - if (status) - return status; - - if (conv_list) { - u16 rem_vsi_id; - - rem_vsi_id = find_first_bit(vsi_list_info->vsi_map, - ICE_MAX_VSI); - - /* Error out when the expected last element is not in - * the VSI list map - */ - if (rem_vsi_id == ICE_MAX_VSI) - return ICE_ERR_OUT_OF_RANGE; - - /* Change the list entry action from VSI_LIST to VSI */ - fm_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; - fm_list_itr->fltr_info.fwd_id.vsi_id = rem_vsi_id; + list_for_each_entry_safe(entry, tmp, rule_head, list_entry) { + list_del(&entry->list_entry); + devm_kfree(ice_hw_to_dev(hw), entry); } - - list_del(&vsi_list_info->list_entry); - devm_kfree(ice_hw_to_dev(hw), vsi_list_info); - fm_list_itr->vsi_list_info = NULL; - } - - if (conv_list) { - /* Convert the rule's forward action to forwarding packets to - * a VSI - */ - struct ice_aqc_sw_rules_elem *s_rule; - - s_rule = devm_kzalloc(ice_hw_to_dev(hw), - ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, - GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule, - ice_aqc_opc_update_sw_rules); - - s_rule->pdata.lkup_tx_rx.index = - cpu_to_le16(fm_list_itr->fltr_info.fltr_rule_id); - - status = ice_aq_sw_rules(hw, s_rule, - ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, - ice_aqc_opc_update_sw_rules, NULL); - devm_kfree(ice_hw_to_dev(hw), s_rule); - if (status) - return status; } - - if (is_last_elem) { - /* Remove the lookup rule */ - struct ice_aqc_sw_rules_elem *s_rule; - - s_rule = devm_kzalloc(ice_hw_to_dev(hw), - ICE_SW_RULE_RX_TX_NO_HDR_SIZE, - GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule, - ice_aqc_opc_remove_sw_rules); - - status = ice_aq_sw_rules(hw, s_rule, - ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, - ice_aqc_opc_remove_sw_rules, NULL); - if (status) - return status; - - /* Remove a book keeping entry from the MAC address list */ - mutex_lock(&sw->mac_list_lock); - list_del(&fm_list_itr->list_entry); - mutex_unlock(&sw->mac_list_lock); - devm_kfree(ice_hw_to_dev(hw), fm_list_itr); - devm_kfree(ice_hw_to_dev(hw), s_rule); - } - return status; -} - -/** - * ice_remove_mac_entry - * @hw: pointer to the hardware structure - * @f_entry: structure containing MAC forwarding information - */ -static enum ice_status -ice_remove_mac_entry(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) -{ - struct ice_fltr_mgmt_list_entry *m_entry; - u16 vsi_id; - u8 *add; - - add = &f_entry->fltr_info.l_data.mac.mac_addr[0]; - - m_entry = ice_find_mac_entry(hw, add); - if (!m_entry) - return ICE_ERR_PARAM; - - vsi_id = f_entry->fltr_info.fwd_id.vsi_id; - return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, m_entry); } /** - * ice_remove_mac - remove a MAC address based filter rule + * ice_cfg_dflt_vsi - change state of VSI to set/clear default * @hw: pointer to the hardware structure - * @m_list: list of MAC addresses and forwarding information - * - * This function removes either a MAC filter rule or a specific VSI from a - * VSI list for a multicast MAC address. - * - * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by - * ice_add_mac. Caller should be aware that this call will only work if all - * the entries passed into m_list were added previously. It will not attempt to - * do a partial remove of entries that were found. - */ -enum ice_status -ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) -{ - struct ice_aqc_sw_rules_elem *s_rule, *r_iter; - u8 s_rule_size = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; - struct ice_switch_info *sw = hw->switch_info; - struct ice_fltr_mgmt_list_entry *m_entry; - struct ice_fltr_list_entry *m_list_itr; - u16 elem_sent, total_elem_left; - enum ice_status status = 0; - u16 num_unicast = 0; - - if (!m_list) - return ICE_ERR_PARAM; - - list_for_each_entry(m_list_itr, m_list, list_entry) { - u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr; - - if (is_unicast_ether_addr(addr) && !hw->ucast_shared) - num_unicast++; - else if (is_multicast_ether_addr(addr) || - (is_unicast_ether_addr(addr) && hw->ucast_shared)) - ice_remove_mac_entry(hw, m_list_itr); - } - - /* Exit if no unicast addresses found. Multicast switch rules - * were added individually - */ - if (!num_unicast) - return 0; - - /* Allocate switch rule buffer for the bulk update for unicast */ - s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, - GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - r_iter = s_rule; - list_for_each_entry(m_list_itr, m_list, list_entry) { - u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr; - - if (is_unicast_ether_addr(addr)) { - m_entry = ice_find_mac_entry(hw, addr); - if (!m_entry) { - status = ICE_ERR_DOES_NOT_EXIST; - goto ice_remove_mac_exit; - } - - ice_fill_sw_rule(hw, &m_entry->fltr_info, - r_iter, ice_aqc_opc_remove_sw_rules); - r_iter = (struct ice_aqc_sw_rules_elem *) - ((u8 *)r_iter + s_rule_size); - } - } - - /* Call AQ bulk switch rule update for all unicast addresses */ - r_iter = s_rule; - /* Call AQ switch rule in AQ_MAX chunk */ - for (total_elem_left = num_unicast; total_elem_left > 0; - total_elem_left -= elem_sent) { - struct ice_aqc_sw_rules_elem *entry = r_iter; - - elem_sent = min(total_elem_left, - (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size)); - status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, - elem_sent, ice_aqc_opc_remove_sw_rules, - NULL); - if (status) - break; - r_iter = (struct ice_aqc_sw_rules_elem *) - ((u8 *)r_iter + s_rule_size); - } - - list_for_each_entry(m_list_itr, m_list, list_entry) { - u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr; - - if (is_unicast_ether_addr(addr)) { - m_entry = ice_find_mac_entry(hw, addr); - if (!m_entry) - return ICE_ERR_OUT_OF_RANGE; - mutex_lock(&sw->mac_list_lock); - list_del(&m_entry->list_entry); - mutex_unlock(&sw->mac_list_lock); - devm_kfree(ice_hw_to_dev(hw), m_entry); - } - } - -ice_remove_mac_exit: - devm_kfree(ice_hw_to_dev(hw), s_rule); - return status; -} - -/** - * ice_cfg_dflt_vsi - add filter rule to set/unset given VSI as default - * VSI for the switch (represented by swid) - * @hw: pointer to the hardware structure - * @vsi_id: number of VSI to set as default + * @vsi_handle: VSI handle to set as default * @set: true to add the above mentioned switch rule, false to remove it * @direction: ICE_FLTR_RX or ICE_FLTR_TX + * + * add filter rule to set/unset given VSI as default VSI for the switch + * (represented by swid) */ enum ice_status -ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction) +ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) { struct ice_aqc_sw_rules_elem *s_rule; struct ice_fltr_info f_info; enum ice_adminq_opc opcode; enum ice_status status; u16 s_rule_size; + u16 hw_vsi_id; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : ICE_SW_RULE_RX_TX_NO_HDR_SIZE; @@ -1654,15 +1943,17 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction) f_info.lkup_type = ICE_SW_LKUP_DFLT; f_info.flag = direction; f_info.fltr_act = ICE_FWD_TO_VSI; - f_info.fwd_id.vsi_id = vsi_id; + f_info.fwd_id.hw_vsi_id = hw_vsi_id; if (f_info.flag & ICE_FLTR_RX) { f_info.src = hw->port_info->lport; + f_info.src_id = ICE_SRC_ID_LPORT; if (!set) f_info.fltr_rule_id = hw->port_info->dflt_rx_vsi_rule_id; } else if (f_info.flag & ICE_FLTR_TX) { - f_info.src = vsi_id; + f_info.src_id = ICE_SRC_ID_VSI; + f_info.src = hw_vsi_id; if (!set) f_info.fltr_rule_id = hw->port_info->dflt_tx_vsi_rule_id; @@ -1682,10 +1973,10 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction) u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); if (f_info.flag & ICE_FLTR_TX) { - hw->port_info->dflt_tx_vsi_num = vsi_id; + hw->port_info->dflt_tx_vsi_num = hw_vsi_id; hw->port_info->dflt_tx_vsi_rule_id = index; } else if (f_info.flag & ICE_FLTR_RX) { - hw->port_info->dflt_rx_vsi_num = vsi_id; + hw->port_info->dflt_rx_vsi_num = hw_vsi_id; hw->port_info->dflt_rx_vsi_rule_id = index; } } else { @@ -1704,26 +1995,38 @@ out: } /** - * ice_remove_vlan_internal - Remove one VLAN based filter rule + * ice_remove_mac - remove a MAC address based filter rule * @hw: pointer to the hardware structure - * @f_entry: filter entry containing one VLAN information + * @m_list: list of MAC addresses and forwarding information + * + * This function removes either a MAC filter rule or a specific VSI from a + * VSI list for a multicast MAC address. + * + * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by + * ice_add_mac. Caller should be aware that this call will only work if all + * the entries passed into m_list were added previously. It will not attempt to + * do a partial remove of entries that were found. */ -static enum ice_status -ice_remove_vlan_internal(struct ice_hw *hw, - struct ice_fltr_list_entry *f_entry) +enum ice_status +ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) { - struct ice_fltr_info *new_fltr; - struct ice_fltr_mgmt_list_entry *v_list_elem; - u16 vsi_id; + struct ice_fltr_list_entry *list_itr, *tmp; - new_fltr = &f_entry->fltr_info; - - v_list_elem = ice_find_vlan_entry(hw, new_fltr->l_data.vlan.vlan_id); - if (!v_list_elem) + if (!m_list) return ICE_ERR_PARAM; - vsi_id = f_entry->fltr_info.fwd_id.vsi_id; - return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, v_list_elem); + list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { + enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; + + if (l_type != ICE_SW_LKUP_MAC) + return ICE_ERR_PARAM; + list_itr->status = ice_remove_rule_internal(hw, + ICE_SW_LKUP_MAC, + list_itr); + if (list_itr->status) + return list_itr->status; + } + return 0; } /** @@ -1734,131 +2037,169 @@ ice_remove_vlan_internal(struct ice_hw *hw, enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) { - struct ice_fltr_list_entry *v_list_itr; - enum ice_status status = 0; + struct ice_fltr_list_entry *v_list_itr, *tmp; if (!v_list || !hw) return ICE_ERR_PARAM; - list_for_each_entry(v_list_itr, v_list, list_entry) { - status = ice_remove_vlan_internal(hw, v_list_itr); - if (status) { - v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL; - return status; - } - v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS; + list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { + enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; + + if (l_type != ICE_SW_LKUP_VLAN) + return ICE_ERR_PARAM; + v_list_itr->status = ice_remove_rule_internal(hw, + ICE_SW_LKUP_VLAN, + v_list_itr); + if (v_list_itr->status) + return v_list_itr->status; } - return status; + return 0; +} + +/** + * ice_vsi_uses_fltr - Determine if given VSI uses specified filter + * @fm_entry: filter entry to inspect + * @vsi_handle: VSI handle to compare with filter info + */ +static bool +ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) +{ + return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && + fm_entry->fltr_info.vsi_handle == vsi_handle) || + (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && + (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); +} + +/** + * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to remove filters from + * @vsi_list_head: pointer to the list to add entry to + * @fi: pointer to fltr_info of filter entry to copy & add + * + * Helper function, used when creating a list of filters to remove from + * a specific VSI. The entry added to vsi_list_head is a COPY of the + * original filter entry, with the exception of fltr_info.fltr_act and + * fltr_info.fwd_id fields. These are set such that later logic can + * extract which VSI to remove the fltr from, and pass on that information. + */ +static enum ice_status +ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, + struct list_head *vsi_list_head, + struct ice_fltr_info *fi) +{ + struct ice_fltr_list_entry *tmp; + + /* this memory is freed up in the caller function + * once filters for this VSI are removed + */ + tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return ICE_ERR_NO_MEMORY; + + tmp->fltr_info = *fi; + + /* Overwrite these fields to indicate which VSI to remove filter from, + * so find and remove logic can extract the information from the + * list entries. Note that original entries will still have proper + * values. + */ + tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp->fltr_info.vsi_handle = vsi_handle; + tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + + list_add(&tmp->list_entry, vsi_list_head); + + return 0; } /** * ice_add_to_vsi_fltr_list - Add VSI filters to the list * @hw: pointer to the hardware structure - * @vsi_id: ID of VSI to remove filters from + * @vsi_handle: VSI handle to remove filters from * @lkup_list_head: pointer to the list that has certain lookup type filters - * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id + * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle + * + * Locates all filters in lkup_list_head that are used by the given VSI, + * and adds COPIES of those entries to vsi_list_head (intended to be used + * to remove the listed filters). + * Note that this means all entries in vsi_list_head must be explicitly + * deallocated by the caller when done with list. */ static enum ice_status -ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id, +ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct list_head *lkup_list_head, struct list_head *vsi_list_head) { struct ice_fltr_mgmt_list_entry *fm_entry; + enum ice_status status = 0; /* check to make sure VSI id is valid and within boundary */ - if (vsi_id >= - (sizeof(fm_entry->vsi_list_info->vsi_map) * BITS_PER_BYTE - 1)) + if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; list_for_each_entry(fm_entry, lkup_list_head, list_entry) { struct ice_fltr_info *fi; fi = &fm_entry->fltr_info; - if ((fi->fltr_act == ICE_FWD_TO_VSI && - fi->fwd_id.vsi_id == vsi_id) || - (fi->fltr_act == ICE_FWD_TO_VSI_LIST && - (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map)))) { - struct ice_fltr_list_entry *tmp; - - /* this memory is freed up in the caller function - * ice_remove_vsi_lkup_fltr() once filters for - * this VSI are removed - */ - tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), - GFP_KERNEL); - if (!tmp) - return ICE_ERR_NO_MEMORY; + if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) + continue; - memcpy(&tmp->fltr_info, fi, sizeof(*fi)); - - /* Expected below fields to be set to ICE_FWD_TO_VSI and - * the particular VSI id since we are only removing this - * one VSI - */ - if (fi->fltr_act == ICE_FWD_TO_VSI_LIST) { - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; - tmp->fltr_info.fwd_id.vsi_id = vsi_id; - } - - list_add(&tmp->list_entry, vsi_list_head); - } + status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, + vsi_list_head, fi); + if (status) + return status; } - return 0; + return status; } /** * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI * @hw: pointer to the hardware structure - * @vsi_id: ID of VSI to remove filters from + * @vsi_handle: VSI handle to remove filters from * @lkup: switch rule filter lookup type */ static void -ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id, +ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, enum ice_sw_lkup_type lkup) { struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_list_entry *fm_entry; struct list_head remove_list_head; + struct list_head *rule_head; struct ice_fltr_list_entry *tmp; + struct mutex *rule_lock; /* Lock to protect filter rule list */ enum ice_status status; INIT_LIST_HEAD(&remove_list_head); + rule_lock = &sw->recp_list[lkup].filt_rule_lock; + rule_head = &sw->recp_list[lkup].filt_rules; + mutex_lock(rule_lock); + status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, + &remove_list_head); + mutex_unlock(rule_lock); + if (status) + return; + switch (lkup) { case ICE_SW_LKUP_MAC: - mutex_lock(&sw->mac_list_lock); - status = ice_add_to_vsi_fltr_list(hw, vsi_id, - &sw->mac_list_head, - &remove_list_head); - mutex_unlock(&sw->mac_list_lock); - if (!status) { - ice_remove_mac(hw, &remove_list_head); - goto free_fltr_list; - } + ice_remove_mac(hw, &remove_list_head); break; case ICE_SW_LKUP_VLAN: - mutex_lock(&sw->vlan_list_lock); - status = ice_add_to_vsi_fltr_list(hw, vsi_id, - &sw->vlan_list_head, - &remove_list_head); - mutex_unlock(&sw->vlan_list_lock); - if (!status) { - ice_remove_vlan(hw, &remove_list_head); - goto free_fltr_list; - } + ice_remove_vlan(hw, &remove_list_head); break; case ICE_SW_LKUP_MAC_VLAN: case ICE_SW_LKUP_ETHERTYPE: case ICE_SW_LKUP_ETHERTYPE_MAC: case ICE_SW_LKUP_PROMISC: - case ICE_SW_LKUP_PROMISC_VLAN: case ICE_SW_LKUP_DFLT: - ice_debug(hw, ICE_DBG_SW, - "Remove filters for this lookup type hasn't been implemented yet\n"); + case ICE_SW_LKUP_PROMISC_VLAN: + case ICE_SW_LKUP_LAST: + default: + ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); break; } - return; -free_fltr_list: list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { list_del(&fm_entry->list_entry); devm_kfree(ice_hw_to_dev(hw), fm_entry); @@ -1868,16 +2209,121 @@ free_fltr_list: /** * ice_remove_vsi_fltr - Remove all filters for a VSI * @hw: pointer to the hardware structure - * @vsi_id: ID of VSI to remove filters from + * @vsi_handle: VSI handle to remove filters from + */ +void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) +{ + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN); +} + +/** + * ice_replay_vsi_fltr - Replay filters for requested VSI + * @hw: pointer to the hardware structure + * @vsi_handle: driver VSI handle + * @recp_id: Recipe id for which rules need to be replayed + * @list_head: list for which filters need to be replayed + * + * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. + * It is required to pass valid VSI handle. + */ +static enum ice_status +ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, + struct list_head *list_head) +{ + struct ice_fltr_mgmt_list_entry *itr; + enum ice_status status = 0; + u16 hw_vsi_id; + + if (list_empty(list_head)) + return status; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + + list_for_each_entry(itr, list_head, list_entry) { + struct ice_fltr_list_entry f_entry; + + f_entry.fltr_info = itr->fltr_info; + if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && + itr->fltr_info.vsi_handle == vsi_handle) { + /* update the src in case it is vsi num */ + if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) + f_entry.fltr_info.src = hw_vsi_id; + status = ice_add_rule_internal(hw, recp_id, &f_entry); + if (status) + goto end; + continue; + } + if (!itr->vsi_list_info || + !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) + continue; + /* Clearing it so that the logic can add it back */ + clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); + f_entry.fltr_info.vsi_handle = vsi_handle; + f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; + /* update the src in case it is vsi num */ + if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) + f_entry.fltr_info.src = hw_vsi_id; + if (recp_id == ICE_SW_LKUP_VLAN) + status = ice_add_vlan_internal(hw, &f_entry); + else + status = ice_add_rule_internal(hw, recp_id, &f_entry); + if (status) + goto end; + } +end: + return status; +} + +/** + * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists + * @hw: pointer to the hardware structure + * @vsi_handle: driver VSI handle + * + * Replays filters for requested VSI via vsi_handle. */ -void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id) +enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) { - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC_VLAN); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_VLAN); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_DFLT); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC); - ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN); + struct ice_switch_info *sw = hw->switch_info; + enum ice_status status = 0; + u8 i; + + for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + struct list_head *head; + + head = &sw->recp_list[i].filt_replay_rules; + status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); + if (status) + return status; + } + return status; +} + +/** + * ice_rm_all_sw_replay_rule_info - deletes filter replay rules + * @hw: pointer to the hw struct + * + * Deletes the filter replay rules. + */ +void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + u8 i; + + if (!sw) + return; + + for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { + struct list_head *l_head; + + l_head = &sw->recp_list[i].filt_replay_rules; + ice_rem_sw_rule_info(hw, l_head); + } + } } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 9b8ec128ee31..b88d96a1ef69 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -17,7 +17,9 @@ struct ice_vsi_ctx { u16 vsis_unallocated; u16 flags; struct ice_aqc_vsi_props info; + struct ice_sched_vsi_info sched; u8 alloc_from_pool; + u8 vf_num; }; enum ice_sw_fwd_act_type { @@ -39,6 +41,15 @@ enum ice_sw_lkup_type { ICE_SW_LKUP_DFLT = 5, ICE_SW_LKUP_ETHERTYPE_MAC = 8, ICE_SW_LKUP_PROMISC_VLAN = 9, + ICE_SW_LKUP_LAST +}; + +/* type of filter src id */ +enum ice_src_id { + ICE_SRC_ID_UNKNOWN = 0, + ICE_SRC_ID_VSI, + ICE_SRC_ID_QUEUE, + ICE_SRC_ID_LPORT, }; struct ice_fltr_info { @@ -55,6 +66,7 @@ struct ice_fltr_info { /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */ u16 src; + enum ice_src_id src_id; union { struct { @@ -76,7 +88,10 @@ struct ice_fltr_info { u16 ethertype; u8 mac_addr[ETH_ALEN]; /* optional */ } ethertype_mac; - } l_data; + } l_data; /* Make sure to zero out the memory of l_data before using + * it or only set the data associated with lookup match + * rest everything should be zero + */ /* Depending on filter action */ union { @@ -84,12 +99,16 @@ struct ice_fltr_info { * queue id in case of ICE_FWD_TO_QGRP. */ u16 q_id:11; - u16 vsi_id:10; + u16 hw_vsi_id:10; u16 vsi_list_id:10; } fwd_id; + /* Sw VSI handle */ + u16 vsi_handle; + /* Set to num_queues if action is ICE_FWD_TO_QGRP. This field - * determines the range of queues the packet needs to be forwarded to + * determines the range of queues the packet needs to be forwarded to. + * Note that qgrp_size must be set to a power of 2. */ u8 qgrp_size; @@ -98,29 +117,52 @@ struct ice_fltr_info { u8 lan_en; /* Indicate if packet can be forwarded to the uplink */ }; +struct ice_sw_recipe { + struct list_head l_entry; + + /* To protect modification of filt_rule list + * defined below + */ + struct mutex filt_rule_lock; + + /* List of type ice_fltr_mgmt_list_entry */ + struct list_head filt_rules; + struct list_head filt_replay_rules; + + /* linked list of type recipe_list_entry */ + struct list_head rg_list; + /* linked list of type ice_sw_fv_list_entry*/ + struct list_head fv_list; + struct ice_aqc_recipe_data_elem *r_buf; + u8 recp_count; + u8 root_rid; + u8 num_profs; + u8 *prof_ids; + + /* recipe bitmap: what all recipes makes this recipe */ + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); +}; + /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ struct ice_vsi_list_map_info { struct list_head list_entry; DECLARE_BITMAP(vsi_map, ICE_MAX_VSI); u16 vsi_list_id; -}; - -enum ice_sw_fltr_status { - ICE_FLTR_STATUS_NEW = 0, - ICE_FLTR_STATUS_FW_SUCCESS, - ICE_FLTR_STATUS_FW_FAIL, + /* counter to track how many rules are reusing this VSI list */ + u16 ref_cnt; }; struct ice_fltr_list_entry { struct list_head list_entry; - enum ice_sw_fltr_status status; + enum ice_status status; struct ice_fltr_info fltr_info; }; /* This defines an entry in the list that maintains MAC or VLAN membership * to HW list mapping, since multiple VSIs can subscribe to the same MAC or * VLAN. As an optimization the VSI list should be created only when a - * second VSI becomes a subscriber to the VLAN address. + * second VSI becomes a subscriber to the same MAC address. VSI lists are always + * used for VLAN membership. */ struct ice_fltr_mgmt_list_entry { /* back pointer to VSI list id to VSI list mapping */ @@ -138,24 +180,33 @@ struct ice_fltr_mgmt_list_entry { /* VSI related commands */ enum ice_status -ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, - struct ice_sq_cd *cd); +ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd); enum ice_status -ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, - struct ice_sq_cd *cd); +ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd); enum ice_status -ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, - bool keep_vsi_alloc, struct ice_sq_cd *cd); - +ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd); +bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); +struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); /* Switch/bridge related commands */ +enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); -void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id); +void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); enum ice_status -ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction); +ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction); + +enum ice_status ice_init_def_sw_recp(struct ice_hw *hw); +u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); +bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); + +enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); +void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 6481e3d86374..5dae968d853e 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -251,6 +251,7 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; + tx_ring->tx_stats.prev_pkt = -1; return 0; err: diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 31bc998fe200..1d0f58bd389b 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -71,6 +71,7 @@ struct ice_txq_stats { u64 restart_q; u64 tx_busy; u64 tx_linearize; + int prev_pkt; /* negative if no pending Tx descriptors */ }; struct ice_rxq_stats { @@ -103,10 +104,17 @@ enum ice_rx_dtype { #define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_TX_ITR ICE_IDX_ITR1 #define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ -#define ICE_ITR_8K 0x003E +#define ICE_ITR_8K 125 +#define ICE_ITR_20K 50 +#define ICE_DFLT_TX_ITR ICE_ITR_20K +#define ICE_DFLT_RX_ITR ICE_ITR_20K +/* apply ITR granularity translation to program the register. itr_gran is either + * 2 or 4 usecs so we need to divide by 2 first then shift by that value + */ +#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> \ + ((itr_gran) / 2)) -/* apply ITR HW granularity translation to program the HW registers */ -#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran)) +#define ICE_DFLT_INTRL 0 /* Legacy or Advanced Mode Queue */ #define ICE_TX_ADVANCED 0 @@ -128,14 +136,6 @@ struct ice_ring { u16 q_index; /* Queue number of ring */ u32 txq_teid; /* Added Tx queue TEID */ - /* high bit set means dynamic, use accessor routines to read/write. - * hardware supports 2us/1us resolution for the ITR registers. - * these values always store the USER setting, and must be converted - * before programming to a register. - */ - u16 rx_itr_setting; - u16 tx_itr_setting; - u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ @@ -172,6 +172,7 @@ struct ice_ring_container { unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_pkts; /* total packets processed this int */ enum ice_latency_range latency_range; + int itr_idx; /* index in the interrupt vector */ u16 itr; }; diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 97c366e0ca59..12f9432abf11 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -18,6 +18,9 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) return test_bit(tc, (unsigned long *)&bitmap); } +/* Driver always calls main vsi_handle first */ +#define ICE_MAIN_VSI_HANDLE 0 + /* debug masks - set these bits in hw->debug_mask to control output */ #define ICE_DBG_INIT BIT_ULL(1) #define ICE_DBG_LINK BIT_ULL(4) @@ -34,10 +37,15 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) enum ice_aq_res_ids { ICE_NVM_RES_ID = 1, ICE_SPD_RES_ID, - ICE_GLOBAL_CFG_LOCK_RES_ID, - ICE_CHANGE_LOCK_RES_ID + ICE_CHANGE_LOCK_RES_ID, + ICE_GLOBAL_CFG_LOCK_RES_ID }; +/* FW update timeout definitions are in milliseconds */ +#define ICE_NVM_TIMEOUT 180000 +#define ICE_CHANGE_LOCK_TIMEOUT 1000 +#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000 + enum ice_aq_res_access_type { ICE_RES_READ = 1, ICE_RES_WRITE @@ -76,6 +84,7 @@ enum ice_media_type { enum ice_vsi_type { ICE_VSI_PF = 0, + ICE_VSI_VF, }; struct ice_link_status { @@ -95,6 +104,15 @@ struct ice_link_status { u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; }; +/* Different reset sources for which a disable queue AQ call has to be made in + * order to clean the TX scheduler as a part of the reset + */ +enum ice_disq_rst_src { + ICE_NO_RESET = 0, + ICE_VM_RESET, + ICE_VF_RESET, +}; + /* PHY info such as phy_type, etc... */ struct ice_phy_info { struct ice_link_status link_info; @@ -119,6 +137,9 @@ struct ice_hw_common_caps { /* Max MTU for function or device */ u16 max_mtu; + /* Virtualization support */ + u8 sr_iov_1_1; /* SR-IOV enabled */ + /* RSS related capabilities */ u16 rss_table_size; /* 512 for PFs and 64 for VFs */ u8 rss_table_entry_width; /* RSS Entry width in bits */ @@ -127,12 +148,15 @@ struct ice_hw_common_caps { /* Function specific capabilities */ struct ice_hw_func_caps { struct ice_hw_common_caps common_cap; + u32 num_allocd_vfs; /* Number of allocated VFs */ + u32 vf_base_id; /* Logical ID of the first VF */ u32 guaranteed_num_vsi; }; /* Device wide capabilities */ struct ice_hw_dev_caps { struct ice_hw_common_caps common_cap; + u32 num_vfs_exposed; /* Total number of VFs exposed */ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ }; @@ -142,11 +166,18 @@ struct ice_mac_info { u8 perm_addr[ETH_ALEN]; }; -/* Various RESET request, These are not tied with HW reset types */ +/* Reset types used to determine which kind of reset was requested. These + * defines match what the RESET_TYPE field of the GLGEN_RSTAT register. + * ICE_RESET_PFR does not match any RESET_TYPE field in the GLGEN_RSTAT register + * because its reset source is different than the other types listed. + */ enum ice_reset_req { - ICE_RESET_PFR = 0, + ICE_RESET_POR = 0, + ICE_RESET_INVAL = 0, ICE_RESET_CORER = 1, ICE_RESET_GLOBR = 2, + ICE_RESET_EMPR = 3, + ICE_RESET_PFR = 4, }; /* Bus parameters */ @@ -180,7 +211,7 @@ struct ice_sched_node { struct ice_sched_node **children; struct ice_aqc_txsched_elem_data info; u32 agg_id; /* aggregator group id */ - u16 vsi_id; + u16 vsi_handle; u8 in_use; /* suspended or in use */ u8 tx_sched_layer; /* Logical Layer (1-9) */ u8 num_children; @@ -204,6 +235,7 @@ enum ice_agg_type { }; #define ICE_SCHED_DFLT_RL_PROF_ID 0 +#define ICE_SCHED_DFLT_BW_WT 1 /* vsi type list entry to locate corresponding vsi/ag nodes */ struct ice_sched_vsi_info { @@ -238,8 +270,6 @@ struct ice_port_info { struct ice_mac_info mac; struct ice_phy_info phy; struct mutex sched_lock; /* protect access to TXSched tree */ - struct ice_sched_tx_policy sched_policy; - struct list_head vsi_info_list; struct list_head agg_list; /* lists all aggregator */ u8 lport; #define ICE_LPORT_MASK 0xff @@ -247,19 +277,26 @@ struct ice_port_info { }; struct ice_switch_info { - /* Switch VSI lists to MAC/VLAN translation */ - struct mutex mac_list_lock; /* protect MAC list */ - struct list_head mac_list_head; - struct mutex vlan_list_lock; /* protect VLAN list */ - struct list_head vlan_list_head; - struct mutex eth_m_list_lock; /* protect ethtype list */ - struct list_head eth_m_list_head; - struct mutex promisc_list_lock; /* protect promisc mode list */ - struct list_head promisc_list_head; - struct mutex mac_vlan_list_lock; /* protect MAC-VLAN list */ - struct list_head mac_vlan_list_head; - struct list_head vsi_list_map_head; + struct ice_sw_recipe *recp_list; +}; + +/* FW logging configuration */ +struct ice_fw_log_evnt { + u8 cfg : 4; /* New event enables to configure */ + u8 cur : 4; /* Current/active event enables */ +}; + +struct ice_fw_log_cfg { + u8 cq_en : 1; /* FW logging is enabled via the control queue */ + u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */ + u8 actv_evnts; /* Cumulation of currently enabled log events */ + +#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S) +#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S) +#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S) +#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S) + struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX]; }; /* Port hardware description */ @@ -286,8 +323,11 @@ struct ice_hw { u8 flattened_layers; u8 max_cgds; u8 sw_entry_point_layer; + u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM]; + struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI]; u8 evb_veb; /* true for VEB, false for VEPA */ + u8 reset_ongoing; /* true if hw is in reset, false otherwise */ struct ice_bus_info bus; struct ice_nvm_info nvm; struct ice_hw_dev_caps dev_caps; /* device capabilities */ @@ -297,6 +337,7 @@ struct ice_hw { /* Control Queue info */ struct ice_ctl_q_info adminq; + struct ice_ctl_q_info mailboxq; u8 api_branch; /* API branch version */ u8 api_maj_ver; /* API major version */ @@ -308,16 +349,27 @@ struct ice_hw { u8 fw_patch; /* firmware patch version */ u32 fw_build; /* firmware build number */ - /* minimum allowed value for different speeds */ -#define ICE_ITR_GRAN_MIN_200 1 -#define ICE_ITR_GRAN_MIN_100 1 -#define ICE_ITR_GRAN_MIN_50 2 -#define ICE_ITR_GRAN_MIN_25 4 + struct ice_fw_log_cfg fw_log; + +/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL + * register. Used for determining the itr/intrl granularity during + * initialization. + */ +#define ICE_MAX_AGG_BW_200G 0x0 +#define ICE_MAX_AGG_BW_100G 0X1 +#define ICE_MAX_AGG_BW_50G 0x2 +#define ICE_MAX_AGG_BW_25G 0x3 + /* ITR granularity for different speeds */ +#define ICE_ITR_GRAN_ABOVE_25 2 +#define ICE_ITR_GRAN_MAX_25 4 /* ITR granularity in 1 us */ - u8 itr_gran_200; - u8 itr_gran_100; - u8 itr_gran_50; - u8 itr_gran_25; + u8 itr_gran; + /* INTRL granularity for different speeds */ +#define ICE_INTRL_GRAN_ABOVE_25 4 +#define ICE_INTRL_GRAN_MAX_25 8 + /* INTRL granularity in 1 us */ + u8 intrl_gran; + u8 ucast_shared; /* true if VSIs can share unicast addr */ }; @@ -391,4 +443,7 @@ struct ice_hw_port_stats { #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 #define ICE_SR_WORDS_IN_1KB 512 +/* Hash redirection LUT for VSI - maximum array size */ +#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4) + #endif /* _ICE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c new file mode 100644 index 000000000000..c25e486706f3 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -0,0 +1,2668 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" + +/** + * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF + * @pf: pointer to the PF structure + * @v_opcode: operation code + * @v_retval: return value + * @msg: pointer to the msg buffer + * @msglen: msg length + */ +static void +ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, + enum ice_status v_retval, u8 *msg, u16 msglen) +{ + struct ice_hw *hw = &pf->hw; + struct ice_vf *vf = pf->vf; + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { + /* Not all vfs are enabled so skip the ones that are not */ + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && + !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + continue; + + /* Ignore return value on purpose - a given VF may fail, but + * we need to keep going and send to all of them + */ + ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg, + msglen, NULL); + } +} + +/** + * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event + * @vf: pointer to the VF structure + * @pfe: pointer to the virtchnl_pf_event to set link speed/status for + * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_* + * @link_up: whether or not to set the link up/down + */ +static void +ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe, + int ice_link_speed, bool link_up) +{ + if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { + pfe->event_data.link_event_adv.link_status = link_up; + /* Speed in Mbps */ + pfe->event_data.link_event_adv.link_speed = + ice_conv_link_speed_to_virtchnl(true, ice_link_speed); + } else { + pfe->event_data.link_event.link_status = link_up; + /* Legacy method for virtchnl link speeds */ + pfe->event_data.link_event.link_speed = + (enum virtchnl_link_speed) + ice_conv_link_speed_to_virtchnl(false, ice_link_speed); + } +} + +/** + * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status + * @vf: pointer to the VF structure + * @pfe: pointer to the virtchnl_pf_event to set link speed/status for + * @link_up: whether or not to set the link up/down + */ +static void +ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe, + bool link_up) +{ + u16 link_speed; + + if (link_up) + link_speed = ICE_AQ_LINK_SPEED_40GB; + else + link_speed = ICE_AQ_LINK_SPEED_UNKNOWN; + + ice_set_pfe_link(vf, pfe, link_speed, link_up); +} + +/** + * ice_vc_notify_vf_link_state - Inform a VF of link status + * @vf: pointer to the VF structure + * + * send a link status message to a single VF + */ +static void ice_vc_notify_vf_link_state(struct ice_vf *vf) +{ + struct virtchnl_pf_event pfe = { 0 }; + struct ice_link_status *ls; + struct ice_pf *pf = vf->pf; + struct ice_hw *hw; + + hw = &pf->hw; + ls = &hw->port_info->phy.link_info; + + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = PF_EVENT_SEVERITY_INFO; + + if (vf->link_forced) + ice_set_pfe_link_forced(vf, &pfe, vf->link_up); + else + ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info & + ICE_AQ_LINK_UP); + + ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, + sizeof(pfe), NULL); +} + +/** + * ice_get_vf_vector - get VF interrupt vector register offset + * @vf_msix: number of MSIx vector per VF on a PF + * @vf_id: VF identifier + * @i: index of MSIx vector + */ +static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i) +{ + return ((i == 0) ? VFINT_DYN_CTLN(vf_id) : + VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1))); +} + +/** + * ice_free_vf_res - Free a VF's resources + * @vf: pointer to the VF info + */ +static void ice_free_vf_res(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + int i, pf_vf_msix; + + /* First, disable VF's configuration API to prevent OS from + * accessing the VF's VSI after it's freed or invalidated. + */ + clear_bit(ICE_VF_STATE_INIT, vf->vf_states); + + /* free vsi & disconnect it from the parent uplink */ + if (vf->lan_vsi_idx) { + ice_vsi_release(pf->vsi[vf->lan_vsi_idx]); + vf->lan_vsi_idx = 0; + vf->lan_vsi_num = 0; + vf->num_mac = 0; + } + + pf_vf_msix = pf->num_vf_msix; + /* Disable interrupts so that VF starts in a known state */ + for (i = 0; i < pf_vf_msix; i++) { + u32 reg_idx; + + reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i); + wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M); + ice_flush(&pf->hw); + } + /* reset some of the state variables keeping track of the resources */ + clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); + clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); +} + +/***********************enable_vf routines*****************************/ + +/** + * ice_dis_vf_mappings + * @vf: pointer to the VF structure + */ +static void ice_dis_vf_mappings(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int first, last, v; + struct ice_hw *hw; + + hw = &pf->hw; + vsi = pf->vsi[vf->lan_vsi_idx]; + + wr32(hw, VPINT_ALLOC(vf->vf_id), 0); + + first = vf->first_vector_idx; + last = first + pf->num_vf_msix - 1; + for (v = first; v <= last; v++) { + u32 reg; + + reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & + GLINT_VECT2FUNC_IS_PF_M) | + ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & + GLINT_VECT2FUNC_PF_NUM_M)); + wr32(hw, GLINT_VECT2FUNC(v), reg); + } + + if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) + wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); + else + dev_err(&pf->pdev->dev, + "Scattered mode for VF Tx queues is not yet implemented\n"); + + if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) + wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); + else + dev_err(&pf->pdev->dev, + "Scattered mode for VF Rx queues is not yet implemented\n"); +} + +/** + * ice_free_vfs - Free all VFs + * @pf: pointer to the PF structure + */ +void ice_free_vfs(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + int tmp, i; + + if (!pf->vf) + return; + + while (test_and_set_bit(__ICE_VF_DIS, pf->state)) + usleep_range(1000, 2000); + + /* Avoid wait time by stopping all VFs at the same time */ + for (i = 0; i < pf->num_alloc_vfs; i++) { + if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states)) + continue; + + /* stop rings without wait time */ + ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx], + ICE_NO_RESET, i); + ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]); + + clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); + } + + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); + + tmp = pf->num_alloc_vfs; + pf->num_vf_qps = 0; + pf->num_alloc_vfs = 0; + for (i = 0; i < tmp; i++) { + if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { + /* disable VF qp mappings */ + ice_dis_vf_mappings(&pf->vf[i]); + + /* Set this state so that assigned VF vectors can be + * reclaimed by PF for reuse in ice_vsi_release(). No + * need to clear this bit since pf->vf array is being + * freed anyways after this for loop + */ + set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states); + ice_free_vf_res(&pf->vf[i]); + } + } + + devm_kfree(&pf->pdev->dev, pf->vf); + pf->vf = NULL; + + /* This check is for when the driver is unloaded while VFs are + * assigned. Setting the number of VFs to 0 through sysfs is caught + * before this function ever gets called. + */ + if (!pci_vfs_assigned(pf->pdev)) { + int vf_id; + + /* Acknowledge VFLR for all VFs. Without this, VFs will fail to + * work correctly when SR-IOV gets re-enabled. + */ + for (vf_id = 0; vf_id < tmp; vf_id++) { + u32 reg_idx, bit_idx; + + reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); + } + } + clear_bit(__ICE_VF_DIS, pf->state); + clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); +} + +/** + * ice_trigger_vf_reset - Reset a VF on HW + * @vf: pointer to the VF structure + * @is_vflr: true if VFLR was issued, false if not + * + * Trigger hardware to start a reset for a particular VF. Expects the caller + * to wait the proper amount of time to allow hardware to reset the VF before + * it cleans up and restores VF functionality. + */ +static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) +{ + struct ice_pf *pf = vf->pf; + u32 reg, reg_idx, bit_idx; + struct ice_hw *hw; + int vf_abs_id, i; + + hw = &pf->hw; + vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; + + /* Inform VF that it is no longer active, as a warning */ + clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); + + /* Disable VF's configuration API during reset. The flag is re-enabled + * in ice_alloc_vf_res(), when it's safe again to access VF's VSI. + * It's normally disabled in ice_free_vf_res(), but it's safer + * to do it earlier to give some time to finish to any VF config + * functions that may still be running at this point. + */ + clear_bit(ICE_VF_STATE_INIT, vf->vf_states); + + /* In the case of a VFLR, the HW has already reset the VF and we + * just need to clean up, so don't hit the VFRTRIG register. + */ + if (!is_vflr) { + /* reset VF using VPGEN_VFRTRIG reg */ + reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); + reg |= VPGEN_VFRTRIG_VFSWR_M; + wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); + } + /* clear the VFLR bit in GLGEN_VFLRSTAT */ + reg_idx = (vf_abs_id) / 32; + bit_idx = (vf_abs_id) % 32; + wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); + ice_flush(hw); + + wr32(hw, PF_PCI_CIAA, + VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); + for (i = 0; i < 100; i++) { + reg = rd32(hw, PF_PCI_CIAD); + if ((reg & VF_TRANS_PENDING_M) != 0) + dev_err(&pf->pdev->dev, + "VF %d PCI transactions stuck\n", vf->vf_id); + udelay(1); + } +} + +/** + * ice_vsi_set_pvid - Set port VLAN id for the VSI + * @vsi: the VSI being changed + * @vid: the VLAN id to set as a PVID + */ +static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED | + ICE_AQ_VSI_PVLAN_INSERT_PVID | + ICE_AQ_VSI_VLAN_EMOD_STR; + ctxt.info.pvid = cpu_to_le16(vid); + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + + status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (status) { + dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + return -EIO; + } + + vsi->info.pvid = ctxt.info.pvid; + vsi->info.vlan_flags = ctxt.info.vlan_flags; + return 0; +} + +/** + * ice_vsi_kill_pvid - Remove port VLAN id from the VSI + * @vsi: the VSI being changed + */ +static int ice_vsi_kill_pvid(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + + if (ice_vsi_manage_vlan_stripping(vsi, false)) { + dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n", + vsi->vsi_num); + return -ENODEV; + } + + vsi->info.pvid = 0; + return 0; +} + +/** + * ice_vf_vsi_setup - Set up a VF VSI + * @pf: board private structure + * @pi: pointer to the port_info instance + * @vf_id: defines VF id to which this VSI connects. + * + * Returns pointer to the successfully allocated VSI struct on success, + * otherwise returns NULL on failure. + */ +static struct ice_vsi * +ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id) +{ + return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id); +} + +/** + * ice_alloc_vsi_res - Setup VF VSI and its resources + * @vf: pointer to the VF structure + * + * Returns 0 on success, negative value on failure + */ +static int ice_alloc_vsi_res(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + LIST_HEAD(tmp_add_list); + u8 broadcast[ETH_ALEN]; + struct ice_vsi *vsi; + int status = 0; + + vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id); + + if (!vsi) { + dev_err(&pf->pdev->dev, "Failed to create VF VSI\n"); + return -ENOMEM; + } + + vf->lan_vsi_idx = vsi->idx; + vf->lan_vsi_num = vsi->vsi_num; + + /* first vector index is the VFs OICR index */ + vf->first_vector_idx = vsi->hw_base_vector; + /* Since hw_base_vector holds the vector where data queue interrupts + * starts, increment by 1 since VFs allocated vectors include OICR intr + * as well. + */ + vsi->hw_base_vector += 1; + + /* Check if port VLAN exist before, and restore it accordingly */ + if (vf->port_vlan_id) + ice_vsi_set_pvid(vsi, vf->port_vlan_id); + + eth_broadcast_addr(broadcast); + + status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); + if (status) + goto ice_alloc_vsi_res_exit; + + if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) { + status = ice_add_mac_to_list(vsi, &tmp_add_list, + vf->dflt_lan_addr.addr); + if (status) + goto ice_alloc_vsi_res_exit; + } + + status = ice_add_mac(&pf->hw, &tmp_add_list); + if (status) + dev_err(&pf->pdev->dev, "could not add mac filters\n"); + + /* Clear this bit after VF initialization since we shouldn't reclaim + * and reassign interrupts for synchronous or asynchronous VFR events. + * We don't want to reconfigure interrupts since AVF driver doesn't + * expect vector assignment to be changed unless there is a request for + * more vectors. + */ + clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states); +ice_alloc_vsi_res_exit: + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + return status; +} + +/** + * ice_alloc_vf_res - Allocate VF resources + * @vf: pointer to the VF structure + */ +static int ice_alloc_vf_res(struct ice_vf *vf) +{ + int status; + + /* setup VF VSI and necessary resources */ + status = ice_alloc_vsi_res(vf); + if (status) + goto ice_alloc_vf_res_exit; + + if (vf->trusted) + set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + else + clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + + /* VF is now completely initialized */ + set_bit(ICE_VF_STATE_INIT, vf->vf_states); + + return status; + +ice_alloc_vf_res_exit: + ice_free_vf_res(vf); + return status; +} + +/** + * ice_ena_vf_mappings + * @vf: pointer to the VF structure + * + * Enable VF vectors and queues allocation by writing the details into + * respective registers. + */ +static void ice_ena_vf_mappings(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int first, last, v; + struct ice_hw *hw; + int abs_vf_id; + u32 reg; + + hw = &pf->hw; + vsi = pf->vsi[vf->lan_vsi_idx]; + first = vf->first_vector_idx; + last = (first + pf->num_vf_msix) - 1; + abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + + /* VF Vector allocation */ + reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | + ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | + VPINT_ALLOC_VALID_M); + wr32(hw, VPINT_ALLOC(vf->vf_id), reg); + + /* map the interrupts to its functions */ + for (v = first; v <= last; v++) { + reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & + GLINT_VECT2FUNC_VF_NUM_M) | + ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & + GLINT_VECT2FUNC_PF_NUM_M)); + wr32(hw, GLINT_VECT2FUNC(v), reg); + } + + /* VF Tx queues allocation */ + if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) { + wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), + VPLAN_TXQ_MAPENA_TX_ENA_M); + /* set the VF PF Tx queue range + * VFNUMQ value should be set to (number of queues - 1). A value + * of 0 means 1 queue and a value of 255 means 256 queues + */ + reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & + VPLAN_TX_QBASE_VFFIRSTQ_M) | + (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & + VPLAN_TX_QBASE_VFNUMQ_M)); + wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); + } else { + dev_err(&pf->pdev->dev, + "Scattered mode for VF Tx queues is not yet implemented\n"); + } + + /* VF Rx queues allocation */ + if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) { + wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), + VPLAN_RXQ_MAPENA_RX_ENA_M); + /* set the VF PF Rx queue range + * VFNUMQ value should be set to (number of queues - 1). A value + * of 0 means 1 queue and a value of 255 means 256 queues + */ + reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & + VPLAN_RX_QBASE_VFFIRSTQ_M) | + (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & + VPLAN_RX_QBASE_VFNUMQ_M)); + wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); + } else { + dev_err(&pf->pdev->dev, + "Scattered mode for VF Rx queues is not yet implemented\n"); + } +} + +/** + * ice_determine_res + * @pf: pointer to the PF structure + * @avail_res: available resources in the PF structure + * @max_res: maximum resources that can be given per VF + * @min_res: minimum resources that can be given per VF + * + * Returns non-zero value if resources (queues/vectors) are available or + * returns zero if PF cannot accommodate for all num_alloc_vfs. + */ +static int +ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res) +{ + bool checked_min_res = false; + int res; + + /* start by checking if PF can assign max number of resources for + * all num_alloc_vfs. + * if yes, return number per VF + * If no, divide by 2 and roundup, check again + * repeat the loop till we reach a point where even minimum resources + * are not available, in that case return 0 + */ + res = max_res; + while ((res >= min_res) && !checked_min_res) { + int num_all_res; + + num_all_res = pf->num_alloc_vfs * res; + if (num_all_res <= avail_res) + return res; + + if (res == min_res) + checked_min_res = true; + + res = DIV_ROUND_UP(res, 2); + } + return 0; +} + +/** + * ice_check_avail_res - check if vectors and queues are available + * @pf: pointer to the PF structure + * + * This function is where we calculate actual number of resources for VF VSIs, + * we don't reserve ahead of time during probe. Returns success if vectors and + * queues resources are available, otherwise returns error code + */ +static int ice_check_avail_res(struct ice_pf *pf) +{ + u16 num_msix, num_txq, num_rxq; + + if (!pf->num_alloc_vfs) + return -EINVAL; + + /* Grab from HW interrupts common pool + * Note: By the time the user decides it needs more vectors in a VF + * its already too late since one must decide this prior to creating the + * VF interface. So the best we can do is take a guess as to what the + * user might want. + * + * We have two policies for vector allocation: + * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small + * number of NFV VFs used for NFV appliances, since this is a special + * case, we try to assign maximum vectors per VF (65) as much as + * possible, based on determine_resources algorithm. + * 2. if num_alloc_vfs is from 17 to 256, then its large number of + * regular VFs which are not used for any special purpose. Hence try to + * grab default interrupt vectors (5 as supported by AVF driver). + */ + if (pf->num_alloc_vfs <= 16) { + num_msix = ice_determine_res(pf, pf->num_avail_hw_msix, + ICE_MAX_INTR_PER_VF, + ICE_MIN_INTR_PER_VF); + } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) { + num_msix = ice_determine_res(pf, pf->num_avail_hw_msix, + ICE_DFLT_INTR_PER_VF, + ICE_MIN_INTR_PER_VF); + } else { + dev_err(&pf->pdev->dev, + "Number of VFs %d exceeds max VF count %d\n", + pf->num_alloc_vfs, ICE_MAX_VF_COUNT); + return -EIO; + } + + if (!num_msix) + return -EIO; + + /* Grab from the common pool + * start by requesting Default queues (4 as supported by AVF driver), + * Note that, the main difference between queues and vectors is, latter + * can only be reserved at init time but queues can be requested by VF + * at runtime through Virtchnl, that is the reason we start by reserving + * few queues. + */ + num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF, + ICE_MIN_QS_PER_VF); + + num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF, + ICE_MIN_QS_PER_VF); + + if (!num_txq || !num_rxq) + return -EIO; + + /* since AVF driver works with only queue pairs which means, it expects + * to have equal number of Rx and Tx queues, so take the minimum of + * available Tx or Rx queues + */ + pf->num_vf_qps = min_t(int, num_txq, num_rxq); + pf->num_vf_msix = num_msix; + + return 0; +} + +/** + * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset + * @vf: pointer to the VF structure + * + * Cleanup a VF after the hardware reset is finished. Expects the caller to + * have verified whether the reset is finished properly, and ensure the + * minimum amount of wait time has passed. Reallocate VF resources back to make + * VF state active + */ +static void ice_cleanup_and_realloc_vf(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_hw *hw; + u32 reg; + + hw = &pf->hw; + + /* PF software completes the flow by notifying VF that reset flow is + * completed. This is done by enabling hardware by clearing the reset + * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT + * register to VFR completed (done at the end of this function) + * By doing this we allow HW to access VF memory at any point. If we + * did it any sooner, HW could access memory while it was being freed + * in ice_free_vf_res(), causing an IOMMU fault. + * + * On the other hand, this needs to be done ASAP, because the VF driver + * is waiting for this to happen and may report a timeout. It's + * harmless, but it gets logged into Guest OS kernel log, so best avoid + * it. + */ + reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); + reg &= ~VPGEN_VFRTRIG_VFSWR_M; + wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); + + /* reallocate VF resources to finish resetting the VSI state */ + if (!ice_alloc_vf_res(vf)) { + ice_ena_vf_mappings(vf); + set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); + clear_bit(ICE_VF_STATE_DIS, vf->vf_states); + vf->num_vlan = 0; + } + + /* Tell the VF driver the reset is done. This needs to be done only + * after VF has been fully initialized, because the VF driver may + * request resources immediately after setting this flag. + */ + wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); +} + +/** + * ice_reset_all_vfs - reset all allocated VFs in one go + * @pf: pointer to the PF structure + * @is_vflr: true if VFLR was issued, false if not + * + * First, tell the hardware to reset each VF, then do all the waiting in one + * chunk, and finally finish restoring each VF after the wait. This is useful + * during PF routines which need to reset all VFs, as otherwise it must perform + * these resets in a serialized fashion. + * + * Returns true if any VFs were reset, and false otherwise. + */ +bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) +{ + struct ice_hw *hw = &pf->hw; + int v, i; + + /* If we don't have any VFs, then there is nothing to reset */ + if (!pf->num_alloc_vfs) + return false; + + /* If VFs have been disabled, there is no need to reset */ + if (test_and_set_bit(__ICE_VF_DIS, pf->state)) + return false; + + /* Begin reset on all VFs at once */ + for (v = 0; v < pf->num_alloc_vfs; v++) + ice_trigger_vf_reset(&pf->vf[v], is_vflr); + + /* Call Disable LAN Tx queue AQ call with VFR bit set and 0 + * queues to inform Firmware about VF reset. + */ + for (v = 0; v < pf->num_alloc_vfs; v++) + ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL, + ICE_VF_RESET, v, NULL); + + /* HW requires some time to make sure it can flush the FIFO for a VF + * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in + * sequence to make sure that it has completed. We'll keep track of + * the VFs using a simple iterator that increments once that VF has + * finished resetting. + */ + for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { + usleep_range(10000, 20000); + + /* Check each VF in sequence */ + while (v < pf->num_alloc_vfs) { + struct ice_vf *vf = &pf->vf[v]; + u32 reg; + + reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); + if (!(reg & VPGEN_VFRSTAT_VFRD_M)) + break; + + /* If the current VF has finished resetting, move on + * to the next VF in sequence. + */ + v++; + } + } + + /* Display a warning if at least one VF didn't manage to reset in + * time, but continue on with the operation. + */ + if (v < pf->num_alloc_vfs) + dev_warn(&pf->pdev->dev, "VF reset check timeout\n"); + usleep_range(10000, 20000); + + /* free VF resources to begin resetting the VSI state */ + for (v = 0; v < pf->num_alloc_vfs; v++) + ice_free_vf_res(&pf->vf[v]); + + if (ice_check_avail_res(pf)) { + dev_err(&pf->pdev->dev, + "Cannot allocate VF resources, try with fewer number of VFs\n"); + return false; + } + + /* Finish the reset on each VF */ + for (v = 0; v < pf->num_alloc_vfs; v++) + ice_cleanup_and_realloc_vf(&pf->vf[v]); + + ice_flush(hw); + clear_bit(__ICE_VF_DIS, pf->state); + + return true; +} + +/** + * ice_reset_vf - Reset a particular VF + * @vf: pointer to the VF structure + * @is_vflr: true if VFLR was issued, false if not + * + * Returns true if the VF is reset, false otherwise. + */ +static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) +{ + struct ice_pf *pf = vf->pf; + struct ice_hw *hw = &pf->hw; + bool rsd = false; + u32 reg; + int i; + + /* If the VFs have been disabled, this means something else is + * resetting the VF, so we shouldn't continue. + */ + if (test_and_set_bit(__ICE_VF_DIS, pf->state)) + return false; + + ice_trigger_vf_reset(vf, is_vflr); + + if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { + ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET, + vf->vf_id); + ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]); + clear_bit(ICE_VF_STATE_ENA, vf->vf_states); + } else { + /* Call Disable LAN Tx queue AQ call even when queues are not + * enabled. This is needed for successful completiom of VFR + */ + ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0, + NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL); + } + + /* poll VPGEN_VFRSTAT reg to make sure + * that reset is complete + */ + for (i = 0; i < 10; i++) { + /* VF reset requires driver to first reset the VF and then + * poll the status register to make sure that the reset + * completed successfully. + */ + usleep_range(10000, 20000); + reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); + if (reg & VPGEN_VFRSTAT_VFRD_M) { + rsd = true; + break; + } + } + + /* Display a warning if VF didn't manage to reset in time, but need to + * continue on with the operation. + */ + if (!rsd) + dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n", + vf->vf_id); + + usleep_range(10000, 20000); + + /* free VF resources to begin resetting the VSI state */ + ice_free_vf_res(vf); + + ice_cleanup_and_realloc_vf(vf); + + ice_flush(hw); + clear_bit(__ICE_VF_DIS, pf->state); + + return true; +} + +/** + * ice_vc_notify_link_state - Inform all VFs on a PF of link status + * @pf: pointer to the PF structure + */ +void ice_vc_notify_link_state(struct ice_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++) + ice_vc_notify_vf_link_state(&pf->vf[i]); +} + +/** + * ice_vc_notify_reset - Send pending reset message to all VFs + * @pf: pointer to the PF structure + * + * indicate a pending reset to all VFs on a given PF + */ +void ice_vc_notify_reset(struct ice_pf *pf) +{ + struct virtchnl_pf_event pfe; + + if (!pf->num_alloc_vfs) + return; + + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; + ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS, + (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); +} + +/** + * ice_vc_notify_vf_reset - Notify VF of a reset event + * @vf: pointer to the VF structure + */ +static void ice_vc_notify_vf_reset(struct ice_vf *vf) +{ + struct virtchnl_pf_event pfe; + + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return; + + /* verify if the VF is in either init or active before proceeding */ + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && + !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + return; + + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; + ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, + (u8 *)&pfe, sizeof(pfe), NULL); +} + +/** + * ice_alloc_vfs - Allocate and set up VFs resources + * @pf: pointer to the PF structure + * @num_alloc_vfs: number of VFs to allocate + */ +static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) +{ + struct ice_hw *hw = &pf->hw; + struct ice_vf *vfs; + int i, ret; + + /* Disable global interrupt 0 so we don't try to handle the VFLR. */ + wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx), + ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); + + ice_flush(hw); + + ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); + if (ret) { + pf->num_alloc_vfs = 0; + goto err_unroll_intr; + } + /* allocate memory */ + vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs), + GFP_KERNEL); + if (!vfs) { + ret = -ENOMEM; + goto err_unroll_sriov; + } + pf->vf = vfs; + + /* apply default profile */ + for (i = 0; i < num_alloc_vfs; i++) { + vfs[i].pf = pf; + vfs[i].vf_sw_id = pf->first_sw; + vfs[i].vf_id = i; + + /* assign default capabilities */ + set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); + vfs[i].spoofchk = true; + + /* Set this state so that PF driver does VF vector assignment */ + set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states); + } + pf->num_alloc_vfs = num_alloc_vfs; + + /* VF resources get allocated during reset */ + if (!ice_reset_all_vfs(pf, false)) + goto err_unroll_sriov; + + goto err_unroll_intr; + +err_unroll_sriov: + pci_disable_sriov(pf->pdev); +err_unroll_intr: + /* rearm interrupts here */ + ice_irq_dynamic_ena(hw, NULL, NULL); + return ret; +} + +/** + * ice_pf_state_is_nominal - checks the pf for nominal state + * @pf: pointer to pf to check + * + * Check the PF's state for a collection of bits that would indicate + * the PF is in a state that would inhibit normal operation for + * driver functionality. + * + * Returns true if PF is in a nominal state. + * Returns false otherwise + */ +static bool ice_pf_state_is_nominal(struct ice_pf *pf) +{ + DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 }; + + if (!pf) + return false; + + bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS); + if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS)) + return false; + + return true; +} + +/** + * ice_pci_sriov_ena - Enable or change number of VFs + * @pf: pointer to the PF structure + * @num_vfs: number of VFs to allocate + */ +static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) +{ + int pre_existing_vfs = pci_num_vf(pf->pdev); + struct device *dev = &pf->pdev->dev; + int err; + + if (!ice_pf_state_is_nominal(pf)) { + dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); + return -EBUSY; + } + + if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { + dev_err(dev, "This device is not capable of SR-IOV\n"); + return -ENODEV; + } + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + ice_free_vfs(pf); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + return num_vfs; + + if (num_vfs > pf->num_vfs_supported) { + dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", + num_vfs, pf->num_vfs_supported); + return -ENOTSUPP; + } + + dev_info(dev, "Allocating %d VFs\n", num_vfs); + err = ice_alloc_vfs(pf, num_vfs); + if (err) { + dev_err(dev, "Failed to enable SR-IOV: %d\n", err); + return err; + } + + set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); + return num_vfs; +} + +/** + * ice_sriov_configure - Enable or change number of VFs via sysfs + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs to allocate + * + * This function is called when the user updates the number of VFs in sysfs. + */ +int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + if (num_vfs) + return ice_pci_sriov_ena(pf, num_vfs); + + if (!pci_vfs_assigned(pdev)) { + ice_free_vfs(pf); + } else { + dev_err(&pf->pdev->dev, + "can't free VFs because some are assigned to VMs.\n"); + return -EBUSY; + } + + return 0; +} + +/** + * ice_process_vflr_event - Free VF resources via IRQ calls + * @pf: pointer to the PF structure + * + * called from the VLFR IRQ handler to + * free up VF resources and state variables + */ +void ice_process_vflr_event(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + int vf_id; + u32 reg; + + if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || + !pf->num_alloc_vfs) + return; + + /* Re-enable the VFLR interrupt cause here, before looking for which + * VF got reset. Otherwise, if another VF gets a reset while the + * first one is being processed, that interrupt will be lost, and + * that VF will be stuck in reset forever. + */ + reg = rd32(hw, PFINT_OICR_ENA); + reg |= PFINT_OICR_VFLR_M; + wr32(hw, PFINT_OICR_ENA, reg); + ice_flush(hw); + + clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state); + for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { + struct ice_vf *vf = &pf->vf[vf_id]; + u32 reg_idx, bit_idx; + + reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + /* read GLGEN_VFLRSTAT register to find out the flr VFs */ + reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); + if (reg & BIT(bit_idx)) + /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ + ice_reset_vf(vf, true); + } +} + +/** + * ice_vc_dis_vf - Disable a given VF via SW reset + * @vf: pointer to the VF info + * + * Disable the VF through a SW reset + */ +static void ice_vc_dis_vf(struct ice_vf *vf) +{ + ice_vc_notify_vf_reset(vf); + ice_reset_vf(vf, false); +} + +/** + * ice_vc_send_msg_to_vf - Send message to VF + * @vf: pointer to the VF info + * @v_opcode: virtual channel opcode + * @v_retval: virtual channel return value + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * send msg to VF + */ +static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, + enum ice_status v_retval, u8 *msg, u16 msglen) +{ + enum ice_status aq_ret; + struct ice_pf *pf; + + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return -EINVAL; + + pf = vf->pf; + + /* single place to detect unsuccessful return values */ + if (v_retval) { + vf->num_inval_msgs++; + dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", + vf->vf_id, v_opcode, v_retval); + if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { + dev_err(&pf->pdev->dev, + "Number of invalid messages exceeded for VF %d\n", + vf->vf_id); + dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + return -EIO; + } + } else { + vf->num_valid_msgs++; + /* reset the invalid counter, if a valid message is received. */ + vf->num_inval_msgs = 0; + } + + aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, + msg, msglen, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "Unable to send the message to VF %d aq_err %d\n", + vf->vf_id, pf->hw.mailboxq.sq_last_status); + return -EIO; + } + + return 0; +} + +/** + * ice_vc_get_ver_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to request the API version used by the PF + */ +static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_version_info info = { + VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR + }; + + vf->vf_ver = *(struct virtchnl_version_info *)msg; + /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ + if (VF_IS_V10(&vf->vf_ver)) + info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; + + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS, + (u8 *)&info, + sizeof(struct virtchnl_version_info)); +} + +/** + * ice_vc_get_vf_res_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to request its resources + */ +static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_vf_resource *vfres = NULL; + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int len = 0; + int ret; + + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto err; + } + + len = sizeof(struct virtchnl_vf_resource); + + vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL); + if (!vfres) { + aq_ret = ICE_ERR_NO_MEMORY; + len = 0; + goto err; + } + if (VF_IS_V11(&vf->vf_ver)) + vf->driver_caps = *(u32 *)msg; + else + vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_RSS_REG | + VIRTCHNL_VF_OFFLOAD_VLAN; + + vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi->info.pvid) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; + } else { + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; + else + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; + } + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; + + if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) + vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; + + vfres->num_vsis = 1; + /* Tx and Rx queue are equal for VF */ + vfres->num_queue_pairs = vsi->num_txq; + vfres->max_vectors = pf->num_vf_msix; + vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; + vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + + vfres->vsi_res[0].vsi_id = vf->lan_vsi_num; + vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; + vfres->vsi_res[0].num_queue_pairs = vsi->num_txq; + ether_addr_copy(vfres->vsi_res[0].default_mac_addr, + vf->dflt_lan_addr.addr); + + set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); + +err: + /* send the response back to the VF */ + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret, + (u8 *)vfres, len); + + devm_kfree(&pf->pdev->dev, vfres); + return ret; +} + +/** + * ice_vc_reset_vf_msg + * @vf: pointer to the VF info + * + * called from the VF to reset itself, + * unlike other virtchnl messages, PF driver + * doesn't send the response back to the VF + */ +static void ice_vc_reset_vf_msg(struct ice_vf *vf) +{ + if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + ice_reset_vf(vf, false); +} + +/** + * ice_find_vsi_from_id + * @pf: the pf structure to search for the VSI + * @id: id of the VSI it is searching for + * + * searches for the VSI with the given id + */ +static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) +{ + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) + return pf->vsi[i]; + + return NULL; +} + +/** + * ice_vc_isvalid_vsi_id + * @vf: pointer to the VF info + * @vsi_id: VF relative VSI id + * + * check for the valid VSI id + */ +static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_find_vsi_from_id(pf, vsi_id); + + return (vsi && (vsi->vf_id == vf->vf_id)); +} + +/** + * ice_vc_isvalid_q_id + * @vf: pointer to the VF info + * @vsi_id: VSI id + * @qid: VSI relative queue id + * + * check for the valid queue id + */ +static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) +{ + struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id); + /* allocated Tx and Rx queues should be always equal for VF VSI */ + return (vsi && (qid < vsi->alloc_txq)); +} + +/** + * ice_vc_config_rss_key + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Configure the VF's RSS key + */ +static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + struct ice_vsi *vsi = NULL; + enum ice_status aq_ret; + int ret; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + ret = ice_set_rss(vsi, vrk->key, NULL, 0); + aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS; +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret, + NULL, 0); +} + +/** + * ice_vc_config_rss_lut + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Configure the VF's RSS LUT + */ +static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; + struct ice_vsi *vsi = NULL; + enum ice_status aq_ret; + int ret; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE); + aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS; +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret, + NULL, 0); +} + +/** + * ice_vc_get_stats_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to get VSI stats + */ +static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; + enum ice_status aq_ret = 0; + struct ice_eth_stats stats; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + memset(&stats, 0, sizeof(struct ice_eth_stats)); + ice_update_eth_stats(vsi); + + stats = vsi->eth_stats; + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, + (u8 *)&stats, sizeof(stats)); +} + +/** + * ice_vc_ena_qs_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to enable all or specific queue(s) + */ +static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; + enum ice_status aq_ret = 0; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!vqs->rx_queues && !vqs->tx_queues) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + /* Enable only Rx rings, Tx rings were enabled by the FW when the + * Tx queue group list was configured and the context bits were + * programmed using ice_vsi_cfg_txqs + */ + if (ice_vsi_start_rx_rings(vsi)) + aq_ret = ICE_ERR_PARAM; + + /* Set flag to indicate that queues are enabled */ + if (!aq_ret) + set_bit(ICE_VF_STATE_ENA, vf->vf_states); + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret, + NULL, 0); +} + +/** + * ice_vc_dis_qs_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to disable all or specific + * queue(s) + */ +static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; + enum ice_status aq_ret = 0; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && + !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!vqs->rx_queues && !vqs->tx_queues) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) { + dev_err(&vsi->back->pdev->dev, + "Failed to stop tx rings on VSI %d\n", + vsi->vsi_num); + aq_ret = ICE_ERR_PARAM; + } + + if (ice_vsi_stop_rx_rings(vsi)) { + dev_err(&vsi->back->pdev->dev, + "Failed to stop rx rings on VSI %d\n", + vsi->vsi_num); + aq_ret = ICE_ERR_PARAM; + } + + /* Clear enabled queues flag */ + if (!aq_ret) + clear_bit(ICE_VF_STATE_ENA, vf->vf_states); + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret, + NULL, 0); +} + +/** + * ice_vc_cfg_irq_map_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to configure the IRQ to queue map + */ +static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_irq_map_info *irqmap_info = + (struct virtchnl_irq_map_info *)msg; + u16 vsi_id, vsi_q_id, vector_id; + struct virtchnl_vector_map *map; + struct ice_vsi *vsi = NULL; + struct ice_pf *pf = vf->pf; + enum ice_status aq_ret = 0; + unsigned long qmap; + int i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < irqmap_info->num_vectors; i++) { + map = &irqmap_info->vecmap[i]; + + vector_id = map->vector_id; + vsi_id = map->vsi_id; + /* validate msg params */ + if (!(vector_id < pf->hw.func_caps.common_cap + .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + /* lookout for the invalid queue index */ + qmap = map->rxq_map; + for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { + struct ice_q_vector *q_vector; + + if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + q_vector = vsi->q_vectors[i]; + q_vector->num_ring_rx++; + q_vector->rx.itr_idx = map->rxitr_idx; + vsi->rx_rings[vsi_q_id]->q_vector = q_vector; + } + + qmap = map->txq_map; + for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { + struct ice_q_vector *q_vector; + + if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + q_vector = vsi->q_vectors[i]; + q_vector->num_ring_tx++; + q_vector->tx.itr_idx = map->txitr_idx; + vsi->tx_rings[vsi_q_id]->q_vector = q_vector; + } + } + + if (vsi) + ice_vsi_cfg_msix(vsi); +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret, + NULL, 0); +} + +/** + * ice_vc_cfg_qs_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to configure the Rx/Tx queues + */ +static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_vsi_queue_config_info *qci = + (struct virtchnl_vsi_queue_config_info *)msg; + struct virtchnl_queue_pair_info *qpi; + enum ice_status aq_ret = 0; + struct ice_vsi *vsi; + int i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < qci->num_queue_pairs; i++) { + qpi = &qci->qpair[i]; + if (qpi->txq.vsi_id != qci->vsi_id || + qpi->rxq.vsi_id != qci->vsi_id || + qpi->rxq.queue_id != qpi->txq.queue_id || + !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + /* copy Tx queue info from VF into VSI */ + vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; + vsi->tx_rings[i]->count = qpi->txq.ring_len; + /* copy Rx queue info from VF into vsi */ + vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; + vsi->rx_rings[i]->count = qpi->rxq.ring_len; + if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + vsi->rx_buf_len = qpi->rxq.databuffer_size; + if (qpi->rxq.max_pkt_size >= (16 * 1024) || + qpi->rxq.max_pkt_size < 64) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + vsi->max_frame = qpi->rxq.max_pkt_size; + } + + /* VF can request to configure less than allocated queues + * or default allocated queues. So update the VSI with new number + */ + vsi->num_txq = qci->num_queue_pairs; + vsi->num_rxq = qci->num_queue_pairs; + + if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi)) + aq_ret = 0; + else + aq_ret = ICE_ERR_PARAM; + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret, + NULL, 0); +} + +/** + * ice_is_vf_trusted + * @vf: pointer to the VF info + */ +static bool ice_is_vf_trusted(struct ice_vf *vf) +{ + return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); +} + +/** + * ice_can_vf_change_mac + * @vf: pointer to the VF info + * + * Return true if the VF is allowed to change its MAC filters, false otherwise + */ +static bool ice_can_vf_change_mac(struct ice_vf *vf) +{ + /* If the VF MAC address has been set administratively (via the + * ndo_set_vf_mac command), then deny permission to the VF to + * add/delete unicast MAC addresses, unless the VF is trusted + */ + if (vf->pf_set_mac && !ice_is_vf_trusted(vf)) + return false; + + return true; +} + +/** + * ice_vc_handle_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @set: true if mac filters are being set, false otherwise + * + * add guest mac address filter + */ +static int +ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) +{ + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; + struct ice_pf *pf = vf->pf; + enum virtchnl_ops vc_op; + enum ice_status ret; + LIST_HEAD(mac_list); + struct ice_vsi *vsi; + int mac_count = 0; + int i; + + if (set) + vc_op = VIRTCHNL_OP_ADD_ETH_ADDR; + else + vc_op = VIRTCHNL_OP_DEL_ETH_ADDR; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || + !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + + if (set && !ice_is_vf_trusted(vf) && + (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) { + dev_err(&pf->pdev->dev, + "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n"); + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + + for (i = 0; i < al->num_elements; i++) { + u8 *maddr = al->list[i].addr; + + if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) || + is_broadcast_ether_addr(maddr)) { + if (set) { + /* VF is trying to add filters that the PF + * already added. Just continue. + */ + dev_info(&pf->pdev->dev, + "mac %pM already set for VF %d\n", + maddr, vf->vf_id); + continue; + } else { + /* VF can't remove dflt_lan_addr/bcast mac */ + dev_err(&pf->pdev->dev, + "can't remove mac %pM for VF %d\n", + maddr, vf->vf_id); + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + } + + /* check for the invalid cases and bail if necessary */ + if (is_zero_ether_addr(maddr)) { + dev_err(&pf->pdev->dev, + "invalid mac %pM provided for VF %d\n", + maddr, vf->vf_id); + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + + if (is_unicast_ether_addr(maddr) && + !ice_can_vf_change_mac(vf)) { + dev_err(&pf->pdev->dev, + "can't change unicast mac for untrusted VF %d\n", + vf->vf_id); + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + + /* get here if maddr is multicast or if VF can change mac */ + if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) { + ret = ICE_ERR_NO_MEMORY; + goto handle_mac_exit; + } + mac_count++; + } + + /* program the updated filter list */ + if (set) + ret = ice_add_mac(&pf->hw, &mac_list); + else + ret = ice_remove_mac(&pf->hw, &mac_list); + + if (ret) { + dev_err(&pf->pdev->dev, + "can't update mac filters for VF %d, error %d\n", + vf->vf_id, ret); + } else { + if (set) + vf->num_mac += mac_count; + else + vf->num_mac -= mac_count; + } + +handle_mac_exit: + ice_free_fltr_list(&pf->pdev->dev, &mac_list); + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0); +} + +/** + * ice_vc_add_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * add guest MAC address filter + */ +static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg) +{ + return ice_vc_handle_mac_addr_msg(vf, msg, true); +} + +/** + * ice_vc_del_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * remove guest MAC address filter + */ +static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg) +{ + return ice_vc_handle_mac_addr_msg(vf, msg, false); +} + +/** + * ice_vc_request_qs_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * VFs get a default number of queues but can use this message to request a + * different number. If the request is successful, PF will reset the VF and + * return 0. If unsuccessful, PF will send message informing VF of number of + * available queue pairs via virtchnl message response to VF. + */ +static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_vf_res_request *vfres = + (struct virtchnl_vf_res_request *)msg; + int req_queues = vfres->num_queue_pairs; + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + int tx_rx_queue_left; + int cur_queues; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + cur_queues = pf->num_vf_qps; + tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + if (req_queues <= 0) { + dev_err(&pf->pdev->dev, + "VF %d tried to request %d queues. Ignoring.\n", + vf->vf_id, req_queues); + } else if (req_queues > ICE_MAX_QS_PER_VF) { + dev_err(&pf->pdev->dev, + "VF %d tried to request more than %d queues.\n", + vf->vf_id, ICE_MAX_QS_PER_VF); + vfres->num_queue_pairs = ICE_MAX_QS_PER_VF; + } else if (req_queues - cur_queues > tx_rx_queue_left) { + dev_warn(&pf->pdev->dev, + "VF %d requested %d more queues, but only %d left.\n", + vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); + vfres->num_queue_pairs = tx_rx_queue_left + cur_queues; + } else { + /* request is successful, then reset VF */ + vf->num_req_qs = req_queues; + ice_vc_dis_vf(vf); + dev_info(&pf->pdev->dev, + "VF %d granted request of %d queues.\n", + vf->vf_id, req_queues); + return 0; + } + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, + aq_ret, (u8 *)vfres, sizeof(*vfres)); +} + +/** + * ice_set_vf_port_vlan + * @netdev: network interface device structure + * @vf_id: VF identifier + * @vlan_id: VLAN id being set + * @qos: priority setting + * @vlan_proto: VLAN protocol + * + * program VF Port VLAN id and/or qos + */ +int +ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, + __be16 vlan_proto) +{ + u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S); + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + struct ice_vsi *vsi; + struct ice_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + if (vlan_id > ICE_MAX_VLANID || qos > 7) { + dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); + return -EINVAL; + } + + if (vlan_proto != htons(ETH_P_8021Q)) { + dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); + return -EPROTONOSUPPORT; + } + + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + if (le16_to_cpu(vsi->info.pvid) == vlanprio) { + /* duplicate request, so just return success */ + dev_info(&pf->pdev->dev, + "Duplicate pvid %d request\n", vlanprio); + return ret; + } + + /* If pvid, then remove all filters on the old VLAN */ + if (vsi->info.pvid) + ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & + VLAN_VID_MASK)); + + if (vlan_id || qos) { + ret = ice_vsi_set_pvid(vsi, vlanprio); + if (ret) + goto error_set_pvid; + } else { + ice_vsi_kill_pvid(vsi); + } + + if (vlan_id) { + dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", + vlan_id, qos, vf_id); + + /* add new VLAN filter for each MAC */ + ret = ice_vsi_add_vlan(vsi, vlan_id); + if (ret) + goto error_set_pvid; + } + + /* The Port VLAN needs to be saved across resets the same as the + * default LAN MAC address. + */ + vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); + +error_set_pvid: + return ret; +} + +/** + * ice_vc_process_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @add_v: Add VLAN if true, otherwise delete VLAN + * + * Process virtchnl op to add or remove programmed guest VLAN id + */ +static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) +{ + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (add_v && !ice_is_vf_trusted(vf) && + vf->num_vlan >= ICE_MAX_VLAN_PER_VF) { + dev_info(&pf->pdev->dev, + "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n"); + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < vfl->num_elements; i++) { + if (vfl->vlan_id[i] > ICE_MAX_VLANID) { + aq_ret = ICE_ERR_PARAM; + dev_err(&pf->pdev->dev, + "invalid VF VLAN id %d\n", vfl->vlan_id[i]); + goto error_param; + } + } + + vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (vsi->info.pvid) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (ice_vsi_manage_vlan_stripping(vsi, add_v)) { + dev_err(&pf->pdev->dev, + "%sable VLAN stripping failed for VSI %i\n", + add_v ? "en" : "dis", vsi->vsi_num); + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (add_v) { + for (i = 0; i < vfl->num_elements; i++) { + u16 vid = vfl->vlan_id[i]; + + if (!ice_vsi_add_vlan(vsi, vid)) { + vf->num_vlan++; + set_bit(vid, vsi->active_vlans); + + /* Enable VLAN pruning when VLAN 0 is added */ + if (unlikely(!vid)) + if (ice_cfg_vlan_pruning(vsi, true)) + aq_ret = ICE_ERR_PARAM; + } else { + aq_ret = ICE_ERR_PARAM; + } + } + } else { + for (i = 0; i < vfl->num_elements; i++) { + u16 vid = vfl->vlan_id[i]; + + /* Make sure ice_vsi_kill_vlan is successful before + * updating VLAN information + */ + if (!ice_vsi_kill_vlan(vsi, vid)) { + vf->num_vlan--; + clear_bit(vid, vsi->active_vlans); + + /* Disable VLAN pruning when removing VLAN 0 */ + if (unlikely(!vid)) + ice_cfg_vlan_pruning(vsi, false); + } + } + } + +error_param: + /* send the response to the VF */ + if (add_v) + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret, + NULL, 0); + else + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret, + NULL, 0); +} + +/** + * ice_vc_add_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Add and program guest VLAN id + */ +static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg) +{ + return ice_vc_process_vlan_msg(vf, msg, true); +} + +/** + * ice_vc_remove_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * remove programmed guest VLAN id + */ +static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg) +{ + return ice_vc_process_vlan_msg(vf, msg, false); +} + +/** + * ice_vc_ena_vlan_stripping + * @vf: pointer to the VF info + * + * Enable VLAN header stripping for a given VF + */ +static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) +{ + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (ice_vsi_manage_vlan_stripping(vsi, true)) + aq_ret = ICE_ERR_AQ_ERROR; + +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, + aq_ret, NULL, 0); +} + +/** + * ice_vc_dis_vlan_stripping + * @vf: pointer to the VF info + * + * Disable VLAN header stripping for a given VF + */ +static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) +{ + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (ice_vsi_manage_vlan_stripping(vsi, false)) + aq_ret = ICE_ERR_AQ_ERROR; + +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, + aq_ret, NULL, 0); +} + +/** + * ice_vc_process_vf_msg - Process request from VF + * @pf: pointer to the PF structure + * @event: pointer to the AQ event + * + * called from the common asq/arq handler to + * process request from VF + */ +void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) +{ + u32 v_opcode = le32_to_cpu(event->desc.cookie_high); + s16 vf_id = le16_to_cpu(event->desc.retval); + u16 msglen = event->msg_len; + u8 *msg = event->msg_buf; + struct ice_vf *vf = NULL; + int err = 0; + + if (vf_id >= pf->num_alloc_vfs) { + err = -EINVAL; + goto error_handler; + } + + vf = &pf->vf[vf_id]; + + /* Check if VF is disabled. */ + if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { + err = -EPERM; + goto error_handler; + } + + /* Perform basic checks on the msg */ + err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); + if (err) { + if (err == VIRTCHNL_ERR_PARAM) + err = -EPERM; + else + err = -EINVAL; + goto error_handler; + } + + /* Perform additional checks specific to RSS and Virtchnl */ + if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { + struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; + + if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) + err = -EINVAL; + } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { + struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; + + if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) + err = -EINVAL; + } + +error_handler: + if (err) { + ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0); + dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", + vf_id, v_opcode, msglen, err); + return; + } + + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + err = ice_vc_get_ver_msg(vf, msg); + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + err = ice_vc_get_vf_res_msg(vf, msg); + break; + case VIRTCHNL_OP_RESET_VF: + ice_vc_reset_vf_msg(vf); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + err = ice_vc_add_mac_addr_msg(vf, msg); + break; + case VIRTCHNL_OP_DEL_ETH_ADDR: + err = ice_vc_del_mac_addr_msg(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + err = ice_vc_cfg_qs_msg(vf, msg); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + err = ice_vc_ena_qs_msg(vf, msg); + ice_vc_notify_vf_link_state(vf); + break; + case VIRTCHNL_OP_DISABLE_QUEUES: + err = ice_vc_dis_qs_msg(vf, msg); + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + err = ice_vc_request_qs_msg(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + err = ice_vc_cfg_irq_map_msg(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + err = ice_vc_config_rss_key(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + err = ice_vc_config_rss_lut(vf, msg); + break; + case VIRTCHNL_OP_GET_STATS: + err = ice_vc_get_stats_msg(vf, msg); + break; + case VIRTCHNL_OP_ADD_VLAN: + err = ice_vc_add_vlan_msg(vf, msg); + break; + case VIRTCHNL_OP_DEL_VLAN: + err = ice_vc_remove_vlan_msg(vf, msg); + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + err = ice_vc_ena_vlan_stripping(vf); + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + err = ice_vc_dis_vlan_stripping(vf); + break; + case VIRTCHNL_OP_UNKNOWN: + default: + dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", + v_opcode, vf_id); + err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL, + NULL, 0); + break; + } + if (err) { + /* Helper function cares less about error return values here + * as it is busy with pending work. + */ + dev_info(&pf->pdev->dev, + "PF failed to honor VF %d, opcode %d\n, error %d\n", + vf_id, v_opcode, err); + } +} + +/** + * ice_get_vf_cfg + * @netdev: network interface device structure + * @vf_id: VF identifier + * @ivi: VF configuration structure + * + * return VF configuration + */ +int ice_get_vf_cfg(struct net_device *netdev, int vf_id, + struct ifla_vf_info *ivi) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_vf *vf; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + netdev_err(netdev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + ivi->vf = vf_id; + ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr); + + /* VF configuration for VLAN and applicable QoS */ + ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M; + ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >> + ICE_VLAN_PRIORITY_S; + + ivi->trusted = vf->trusted; + ivi->spoofchk = vf->spoofchk; + if (!vf->link_forced) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + ivi->max_tx_rate = vf->tx_rate; + ivi->min_tx_rate = 0; + return 0; +} + +/** + * ice_set_vf_spoofchk + * @netdev: network interface device structure + * @vf_id: VF identifier + * @ena: flag to enable or disable feature + * + * Enable or disable VF spoof checking + */ +int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi_ctx ctx = { 0 }; + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_vf *vf; + int status; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + netdev_err(netdev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + if (ena == vf->spoofchk) { + dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n", + ena ? "ON" : "OFF"); + return 0; + } + + ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + + if (ena) { + ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; + ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M; + } + + status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL); + if (status) { + dev_dbg(&pf->pdev->dev, + "Error %d, failed to update VSI* parameters\n", status); + return -EIO; + } + + vf->spoofchk = ena; + vsi->info.sec_flags = ctx.info.sec_flags; + vsi->info.sw_flags2 = ctx.info.sw_flags2; + + return status; +} + +/** + * ice_set_vf_mac + * @netdev: network interface device structure + * @vf_id: VF identifier + * @mac: mac address + * + * program VF mac address + */ +int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + netdev_err(netdev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) { + netdev_err(netdev, "%pM not a valid unicast address\n", mac); + return -EINVAL; + } + + /* copy mac into dflt_lan_addr and trigger a VF reset. The reset + * flow will use the updated dflt_lan_addr and add a MAC filter + * using ice_add_mac. Also set pf_set_mac to indicate that the PF has + * set the MAC address for this VF. + */ + ether_addr_copy(vf->dflt_lan_addr.addr, mac); + vf->pf_set_mac = true; + netdev_info(netdev, + "mac on VF %d set to %pM\n. VF driver will be reinitialized\n", + vf_id, mac); + + ice_vc_dis_vf(vf); + return ret; +} + +/** + * ice_set_vf_trust + * @netdev: network interface device structure + * @vf_id: VF identifier + * @trusted: Boolean value to enable/disable trusted VF + * + * Enable or disable a given VF as trusted + */ +int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_vf *vf; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + /* Check if already trusted */ + if (trusted == vf->trusted) + return 0; + + vf->trusted = trusted; + ice_vc_dis_vf(vf); + dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", + vf_id, trusted ? "" : "un"); + + return 0; +} + +/** + * ice_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: VF identifier + * @link_state: required link state + * + * Set VF's link state, irrespective of physical link state status + */ +int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + struct virtchnl_pf_event pfe = { 0 }; + struct ice_link_status *ls; + struct ice_vf *vf; + struct ice_hw *hw; + + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + hw = &pf->hw; + ls = &pf->hw.port_info->phy.link_info; + + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = PF_EVENT_SEVERITY_INFO; + + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + vf->link_up = ls->link_info & ICE_AQ_LINK_UP; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + break; + default: + return -EINVAL; + } + + if (vf->link_forced) + ice_set_pfe_link_forced(vf, &pfe, vf->link_up); + else + ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up); + + /* Notify the VF of its new link state */ + ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, + sizeof(pfe), NULL); + + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h new file mode 100644 index 000000000000..10131e0180f9 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_VIRTCHNL_PF_H_ +#define _ICE_VIRTCHNL_PF_H_ +#include "ice.h" + +#define ICE_MAX_VLANID 4095 +#define ICE_VLAN_PRIORITY_S 12 +#define ICE_VLAN_M 0xFFF +#define ICE_PRIORITY_M 0x7000 + +/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */ +#define ICE_MAX_VLAN_PER_VF 8 +#define ICE_MAX_MACADDR_PER_VF 12 + +/* Malicious Driver Detection */ +#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3 +#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 + +/* Static VF transaction/status register def */ +#define VF_DEVICE_STATUS 0xAA +#define VF_TRANS_PENDING_M 0x20 + +/* Specific VF states */ +enum ice_vf_states { + ICE_VF_STATE_INIT = 0, + ICE_VF_STATE_ACTIVE, + ICE_VF_STATE_ENA, + ICE_VF_STATE_DIS, + ICE_VF_STATE_MC_PROMISC, + ICE_VF_STATE_UC_PROMISC, + /* state to indicate if PF needs to do vector assignment for VF. + * This needs to be set during first time VF initialization or later + * when VF asks for more Vectors through virtchnl OP. + */ + ICE_VF_STATE_CFG_INTR, + ICE_VF_STATES_NBITS +}; + +/* VF capabilities */ +enum ice_virtchnl_cap { + ICE_VIRTCHNL_VF_CAP_L2 = 0, + ICE_VIRTCHNL_VF_CAP_PRIVILEGE, +}; + +/* VF information structure */ +struct ice_vf { + struct ice_pf *pf; + + s16 vf_id; /* VF id in the PF space */ + u32 driver_caps; /* reported by VF driver */ + int first_vector_idx; /* first vector index of this VF */ + struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */ + struct virtchnl_version_info vf_ver; + struct virtchnl_ether_addr dflt_lan_addr; + u16 port_vlan_id; + u8 pf_set_mac; /* VF MAC address set by VMM admin */ + u8 trusted; + u16 lan_vsi_idx; /* index into PF struct */ + u16 lan_vsi_num; /* ID as used by firmware */ + u64 num_mdd_events; /* number of mdd events detected */ + u64 num_inval_msgs; /* number of continuous invalid msgs */ + u64 num_valid_msgs; /* number of valid msgs detected */ + unsigned long vf_caps; /* vf's adv. capabilities */ + DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ + unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ + u8 link_forced; + u8 link_up; /* only valid if VF link is forced */ + u8 spoofchk; + u16 num_mac; + u16 num_vlan; + u8 num_req_qs; /* num of queue pairs requested by VF */ +}; + +#ifdef CONFIG_PCI_IOV +void ice_process_vflr_event(struct ice_pf *pf); +int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); +int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); +int ice_get_vf_cfg(struct net_device *netdev, int vf_id, + struct ifla_vf_info *ivi); + +void ice_free_vfs(struct ice_pf *pf); +void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); +void ice_vc_notify_link_state(struct ice_pf *pf); +void ice_vc_notify_reset(struct ice_pf *pf); +bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); + +int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, + u16 vlan_id, u8 qos, __be16 vlan_proto); + +int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate); + +int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); + +int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); + +int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); +#else /* CONFIG_PCI_IOV */ +#define ice_process_vflr_event(pf) do {} while (0) +#define ice_free_vfs(pf) do {} while (0) +#define ice_vc_process_vf_msg(pf, event) do {} while (0) +#define ice_vc_notify_link_state(pf) do {} while (0) +#define ice_vc_notify_reset(pf) do {} while (0) + +static inline bool +ice_reset_all_vfs(struct ice_pf __always_unused *pf, + bool __always_unused is_vflr) +{ + return true; +} + +static inline int +ice_sriov_configure(struct pci_dev __always_unused *pdev, + int __always_unused num_vfs) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_mac(struct net_device __always_unused *netdev, + int __always_unused vf_id, u8 __always_unused *mac) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_get_vf_cfg(struct net_device __always_unused *netdev, + int __always_unused vf_id, + struct ifla_vf_info __always_unused *ivi) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_trust(struct net_device __always_unused *netdev, + int __always_unused vf_id, bool __always_unused trusted) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_port_vlan(struct net_device __always_unused *netdev, + int __always_unused vf_id, u16 __always_unused vid, + u8 __always_unused qos, __be16 __always_unused v_proto) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_spoofchk(struct net_device __always_unused *netdev, + int __always_unused vf_id, bool __always_unused ena) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_link_state(struct net_device __always_unused *netdev, + int __always_unused vf_id, int __always_unused link_state) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_bw(struct net_device __always_unused *netdev, + int __always_unused vf_id, int __always_unused min_tx_rate, + int __always_unused max_tx_rate) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_PCI_IOV */ +#endif /* _ICE_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 0796cef96fa3..5df88ad8ac81 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -239,7 +239,7 @@ static struct pci_driver igb_driver = { MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) @@ -9086,7 +9086,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; pci_ers_result_t result; - int err; if (pci_enable_device_mem(pdev)) { dev_err(&pdev->dev, @@ -9110,14 +9109,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) result = PCI_ERS_RESULT_RECOVERED; } - err = pci_cleanup_aer_uncorrect_error_status(pdev); - if (err) { - dev_err(&pdev->dev, - "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", - err); - /* non-fatal, continue */ - } - return result; } diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index e0c989ffb2b3..820d49eb41ab 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -3011,7 +3011,7 @@ module_exit(igbvf_exit_module); MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); /* netdev.c */ diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile new file mode 100644 index 000000000000..4387f6ba8e67 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2018 Intel Corporation + +# +# Intel(R) I225-LM/I225-V 2.5G Ethernet Controller +# + +obj-$(CONFIG_IGC) += igc.o + +igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h new file mode 100644 index 000000000000..cdf18a5d9e08 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_H_ +#define _IGC_H_ + +#include <linux/kobject.h> + +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> + +#include <linux/ethtool.h> + +#include <linux/sctp.h> + +#define IGC_ERR(args...) pr_err("igc: " args) + +#define PFX "igc: " + +#include <linux/timecounter.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> + +#include "igc_hw.h" + +/* main */ +extern char igc_driver_name[]; +extern char igc_driver_version[]; + +/* Interrupt defines */ +#define IGC_START_ITR 648 /* ~6000 ints/sec */ +#define IGC_FLAG_HAS_MSI BIT(0) +#define IGC_FLAG_QUEUE_PAIRS BIT(4) +#define IGC_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGC_FLAG_MEDIA_RESET BIT(10) +#define IGC_FLAG_MAS_ENABLE BIT(12) +#define IGC_FLAG_HAS_MSIX BIT(13) +#define IGC_FLAG_VLAN_PROMISC BIT(15) + +#define IGC_START_ITR 648 /* ~6000 ints/sec */ +#define IGC_4K_ITR 980 +#define IGC_20K_ITR 196 +#define IGC_70K_ITR 56 + +#define IGC_DEFAULT_ITR 3 /* dynamic */ +#define IGC_MAX_ITR_USECS 10000 +#define IGC_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_MSIX_ENTRIES 10 + +/* TX/RX descriptor defines */ +#define IGC_DEFAULT_TXD 256 +#define IGC_DEFAULT_TX_WORK 128 +#define IGC_MIN_TXD 80 +#define IGC_MAX_TXD 4096 + +#define IGC_DEFAULT_RXD 256 +#define IGC_MIN_RXD 80 +#define IGC_MAX_RXD 4096 + +/* Transmit and receive queues */ +#define IGC_MAX_RX_QUEUES 4 +#define IGC_MAX_TX_QUEUES 4 + +#define MAX_Q_VECTORS 8 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +/* Supported Rx Buffer Sizes */ +#define IGC_RXBUFFER_256 256 +#define IGC_RXBUFFER_2048 2048 +#define IGC_RXBUFFER_3072 3072 + +#define IGC_RX_HDR_LEN IGC_RXBUFFER_256 + +/* RX and TX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGC_RX_PTHRESH 8 +#define IGC_RX_HTHRESH 8 +#define IGC_TX_PTHRESH 8 +#define IGC_TX_HTHRESH 1 +#define IGC_RX_WTHRESH 4 +#define IGC_TX_WTHRESH 16 + +#define IGC_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define IGC_TS_HDR_LEN 16 + +#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) + +#if (PAGE_SIZE < 8192) +#define IGC_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) +#else +#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +/* igc_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +enum igc_state_t { + __IGC_TESTING, + __IGC_RESETTING, + __IGC_DOWN, + __IGC_PTP_TX_IN_PROGRESS, +}; + +enum igc_tx_flags { + /* cmd_type flags */ + IGC_TX_FLAGS_VLAN = 0x01, + IGC_TX_FLAGS_TSO = 0x02, + IGC_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGC_TX_FLAGS_IPV4 = 0x10, + IGC_TX_FLAGS_CSUM = 0x20, +}; + +enum igc_boards { + board_base, +}; + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGC_MAX_TXD_PWR 15 +#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igc_tx_buffer { + union igc_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igc_rx_buffer { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +}; + +struct igc_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igc_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igc_rx_packet_stats { + u64 ipv4_packets; /* IPv4 headers processed */ + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ + u64 ipv6_packets; /* IPv6 headers processed */ + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ + u64 tcp_packets; /* TCP headers processed */ + u64 udp_packets; /* UDP headers processed */ + u64 sctp_packets; /* SCTP headers processed */ + u64 nfs_packets; /* NFS headers processe */ + u64 other_packets; +}; + +struct igc_ring_container { + struct igc_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igc_ring { + struct igc_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igc_tx_buffer *tx_buffer_info; + struct igc_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igc_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct igc_rx_queue_stats rx_stats; + struct igc_rx_packet_stats pkt_stats; + struct u64_stats_sync rx_syncp; + struct sk_buff *skb; + }; + }; +} ____cacheline_internodealigned_in_smp; + +struct igc_q_vector { + struct igc_adapter *adapter; /* backlink */ + void __iomem *itr_register; + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + + struct igc_ring_container rx, tx; + + struct napi_struct napi; + + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + struct net_device poll_dev; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igc_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +struct igc_mac_addr { + u8 addr[ETH_ALEN]; + u8 queue; + u8 state; /* bitmask */ +}; + +#define IGC_MAC_STATE_DEFAULT 0x1 +#define IGC_MAC_STATE_MODIFIED 0x2 +#define IGC_MAC_STATE_IN_USE 0x4 + +/* Board specific private data structure */ +struct igc_adapter { + struct net_device *netdev; + + unsigned long state; + unsigned int flags; + unsigned int num_q_vectors; + + struct msix_entry *msix_entries; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; + struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; + struct timer_list dma_err_timer; + struct timer_list phy_info_timer; + + u16 link_speed; + u16 link_duplex; + + u8 port_num; + + u8 __iomem *io_addr; + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; + struct work_struct dma_err_task; + bool fc_autoneg; + + u8 tx_timeout_factor; + + int msg_enable; + u32 max_frame_size; + u32 min_frame_size; + + /* OS defined structs */ + struct pci_dev *pdev; + /* lock for statistics */ + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in igc_hw.h */ + struct igc_hw hw; + struct igc_hw_stats stats; + + struct igc_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + u16 tx_ring_count; + u16 rx_ring_count; + + u32 *shadow_vfta; + + u32 rss_queues; + + /* lock for RX network flow classification filter */ + spinlock_t nfc_lock; + + struct igc_mac_addr *mac_table; + + unsigned long link_check_timeout; + struct igc_info ei; +}; + +/* igc_desc_unused - calculate if we have unused descriptors */ +static inline u16 igc_desc_unused(const struct igc_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline s32 igc_get_phy_info(struct igc_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline s32 igc_reset_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +enum igc_ring_flags_t { + IGC_RING_FLAG_RX_3K_BUFFER, + IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGC_RING_FLAG_RX_SCTP_CSUM, + IGC_RING_FLAG_RX_LB_VLAN_BSWAP, + IGC_RING_FLAG_TX_CTX_IDX, + IGC_RING_FLAG_TX_DETECT_HANG +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGC_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; +#endif + return IGC_RXBUFFER_2048; +} + +static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return 0; +} + +#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) + +#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) + +#define IGC_RX_DESC(R, i) \ + (&(((union igc_adv_rx_desc *)((R)->desc))[i])) +#define IGC_TX_DESC(R, i) \ + (&(((union igc_adv_tx_desc *)((R)->desc))[i])) +#define IGC_TX_CTXTDESC(R, i) \ + (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) + +#endif /* _IGC_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c new file mode 100644 index 000000000000..832da609d9a7 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -0,0 +1,541 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include <linux/delay.h> + +#include "igc_hw.h" +#include "igc_i225.h" +#include "igc_mac.h" +#include "igc_base.h" +#include "igc.h" + +/** + * igc_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + */ +static s32 igc_set_pcie_completion_timeout(struct igc_hw *hw) +{ + u32 gcr = rd32(IGC_GCR); + u16 pcie_devctl2; + s32 ret_val = 0; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & IGC_GCR_CMPL_TMOUT_MASK) + goto out; + + /* if capabilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & IGC_GCR_CAP_VER2)) { + gcr |= IGC_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = igc_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = igc_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~IGC_GCR_CMPL_TMOUT_RESEND; + + wr32(IGC_GCR, gcr); + + return ret_val; +} + +/** + * igc_check_for_link_base - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + */ +static s32 igc_check_for_link_base(struct igc_hw *hw) +{ + s32 ret_val = 0; + + ret_val = igc_check_for_copper_link(hw); + + return ret_val; +} + +/** + * igc_reset_hw_base - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_reset_hw_base(struct igc_hw *hw) +{ + s32 ret_val; + u32 ctrl; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igc_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ + ret_val = igc_set_pcie_completion_timeout(hw); + if (ret_val) + hw_dbg("PCI-E Set completion timeout has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(IGC_IMC, 0xffffffff); + + wr32(IGC_RCTL, 0); + wr32(IGC_TCTL, IGC_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(IGC_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); + + ret_val = igc_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* Clear any pending interrupt events. */ + wr32(IGC_IMC, 0xffffffff); + rd32(IGC_ICR); + + return ret_val; +} + +/** + * igc_get_phy_id_base - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + */ +static s32 igc_get_phy_id_base(struct igc_hw *hw) +{ + s32 ret_val = 0; + + ret_val = igc_get_phy_id(hw); + + return ret_val; +} + +/** + * igc_init_nvm_params_base - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_nvm_params_base(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(IGC_EECD); + u16 size; + + size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> + IGC_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? + 16 : 8; + + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + return 0; +} + +/** + * igc_setup_copper_link_base - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + */ +static s32 igc_setup_copper_link_base(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_SLU; + ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); + wr32(IGC_CTRL, ctrl); + + ret_val = igc_setup_copper_link(hw); + + return ret_val; +} + +/** + * igc_init_mac_params_base - Init MAC func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_mac_params_base(struct igc_hw *hw) +{ + struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base; + struct igc_mac_info *mac = &hw->mac; + + /* Set mta register count */ + mac->mta_reg_count = 128; + mac->rar_entry_count = IGC_RAR_ENTRIES; + + /* reset */ + mac->ops.reset_hw = igc_reset_hw_base; + + mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; + mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; + + /* Allow a single clear of the SW semaphore on I225 */ + if (mac->type == igc_i225) + dev_spec->clear_semaphore_once = true; + + /* physical interface link setup */ + mac->ops.setup_physical_interface = igc_setup_copper_link_base; + + return 0; +} + +/** + * igc_init_phy_params_base - Init PHY func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_phy_params_base(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u32 ctrl_ext; + + if (hw->phy.media_type != igc_media_type_copper) { + phy->type = igc_phy_none; + goto out; + } + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; + phy->reset_delay_us = 100; + + ctrl_ext = rd32(IGC_CTRL_EXT); + + /* set lan id */ + hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> + IGC_STATUS_FUNC_SHIFT; + + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + + ret_val = igc_get_phy_id_base(hw); + if (ret_val) + return ret_val; + + igc_check_for_link_base(hw); + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case I225_I_PHY_ID: + phy->type = igc_phy_i225; + break; + default: + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +static s32 igc_get_invariants_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u32 link_mode = 0; + u32 ctrl_ext = 0; + s32 ret_val = 0; + + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + mac->type = igc_i225; + break; + default: + return -IGC_ERR_MAC_INIT; + } + + hw->phy.media_type = igc_media_type_copper; + + ctrl_ext = rd32(IGC_CTRL_EXT); + link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK; + + /* mac initialization and operations */ + ret_val = igc_init_mac_params_base(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igc_init_nvm_params_base(hw); + switch (hw->mac.type) { + case igc_i225: + ret_val = igc_init_nvm_params_i225(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igc_init_phy_params_base(hw); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * igc_acquire_phy_base - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_acquire_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igc_release_phy_base - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static void igc_release_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igc_get_link_up_info_base - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + */ +static s32 igc_get_link_up_info_base(struct igc_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + ret_val = igc_get_speed_and_duplex_copper(hw, speed, duplex); + + return ret_val; +} + +/** + * igc_init_hw_base - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + */ +static s32 igc_init_hw_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u16 i, rar_count = mac->rar_entry_count; + s32 ret_val = 0; + + /* Setup the receive address */ + igc_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(IGC_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(IGC_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igc_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igc_clear_hw_cntrs_base(hw); + + return ret_val; +} + +/** + * igc_read_mac_addr_base - Read device MAC address + * @hw: pointer to the HW structure + */ +static s32 igc_read_mac_addr_base(struct igc_hw *hw) +{ + s32 ret_val = 0; + + ret_val = igc_read_mac_addr(hw); + + return ret_val; +} + +/** + * igc_power_down_phy_copper_base - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + */ +void igc_power_down_phy_copper_base(struct igc_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw))) + igc_power_down_phy_copper(hw); +} + +/** + * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + */ +void igc_rx_fifo_flush_base(struct igc_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(IGC_RFCTL); + rfctl |= IGC_RFCTL_IPV6_EX_DIS; + wr32(IGC_RFCTL, rfctl); + + if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(IGC_RXDCTL(i)); + wr32(IGC_RXDCTL(i), + rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(IGC_RXDCTL(i)); + if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + pr_debug("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); + + rlpml = rd32(IGC_RLPML); + wr32(IGC_RLPML, 0); + + rctl = rd32(IGC_RCTL); + temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); + temp_rctl |= IGC_RCTL_LPE; + + wr32(IGC_RCTL, temp_rctl); + wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(IGC_RXDCTL(i), rxdctl[i]); + wr32(IGC_RCTL, rctl); + wrfl(); + + wr32(IGC_RLPML, rlpml); + wr32(IGC_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(IGC_ROC); + rd32(IGC_RNBC); + rd32(IGC_MPC); +} + +static struct igc_mac_operations igc_mac_ops_base = { + .init_hw = igc_init_hw_base, + .check_for_link = igc_check_for_link_base, + .rar_set = igc_rar_set, + .read_mac_addr = igc_read_mac_addr_base, + .get_speed_and_duplex = igc_get_link_up_info_base, +}; + +static const struct igc_phy_operations igc_phy_ops_base = { + .acquire = igc_acquire_phy_base, + .release = igc_release_phy_base, + .reset = igc_phy_hw_reset, + .read_reg = igc_read_phy_reg_gpy, + .write_reg = igc_write_phy_reg_gpy, +}; + +const struct igc_info igc_base_info = { + .get_invariants = igc_get_invariants_base, + .mac_ops = &igc_mac_ops_base, + .phy_ops = &igc_phy_ops_base, +}; diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h new file mode 100644 index 000000000000..35588fa7b8c5 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_BASE_H +#define _IGC_BASE_H + +/* forward declaration */ +void igc_rx_fifo_flush_base(struct igc_hw *hw); +void igc_power_down_phy_copper_base(struct igc_hw *hw); + +/* Transmit Descriptor - Advanced */ +union igc_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +#define IGC_RAR_ENTRIES 16 + +struct igc_adv_data_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u32 datalen:16; /* Data buffer length */ + u32 rsvd:4; + u32 dtyp:4; /* Descriptor type */ + u32 dcmd:8; /* Descriptor command */ + } config; + } lower; + union { + u32 data; + struct { + u32 status:4; /* Descriptor status */ + u32 idx:4; + u32 popts:6; /* Packet Options */ + u32 paylen:18; /* Payload length */ + } options; + } upper; +}; + +/* Receive Descriptor - Advanced */ +union igc_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Additional Transmit Descriptor Control definitions */ +#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ + +/* Additional Receive Descriptor Control definitions */ +#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ + +/* SRRCTL bit definitions */ +#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 + +#endif /* _IGC_BASE_H */ diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h new file mode 100644 index 000000000000..8740754ea1fd --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -0,0 +1,389 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_DEFINES_H_ +#define _IGC_DEFINES_H_ + +#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ + +/* PCI Bus Info */ +#define PCIE_DEVICE_CONTROL2 0x28 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +/* Physical Func Reset Done Indication */ +#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/*Blocks new Master requests */ +#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 +/* Status of Master requests. */ +#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 + +/* PCI Express Control */ +#define IGC_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define IGC_GCR_CMPL_TMOUT_10ms 0x00001000 +#define IGC_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define IGC_GCR_CAP_VER2 0x00040000 + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define IGC_RAH_POOL_1 0x00040000 +#define IGC_RAL_MAC_ADDR_LEN 4 +#define IGC_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define IGC_SUCCESS 0 +#define IGC_ERR_NVM 1 +#define IGC_ERR_PHY 2 +#define IGC_ERR_CONFIG 3 +#define IGC_ERR_PARAM 4 +#define IGC_ERR_MAC_INIT 5 +#define IGC_ERR_RESET 9 +#define IGC_ERR_MASTER_REQUESTS_PENDING 10 +#define IGC_ERR_BLK_PHY_RESET 12 +#define IGC_ERR_SWFW_SYNC 13 + +/* Device Control */ +#define IGC_CTRL_RST 0x04000000 /* Global reset */ + +#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */ + +#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ + +#define IGC_CONNSW_AUTOSENSE_EN 0x1 + +/* PBA constants */ +#define IGC_PBA_34K 0x0022 + +/* SW Semaphore Register */ +#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* SWFW_SYNC Definitions */ +#define IGC_SWFW_EEP_SM 0x1 +#define IGC_SWFW_PHY0_SM 0x2 + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ + +/* PHY GPY 211 registers */ +#define STANDARD_AN_REG_MASK 0x0007 /* MMD */ +#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ +#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ +#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ + +/* NVM Control */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 +#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */ +#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define IGC_EECD_ADDR_BITS 0x00000400 +#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define IGC_EECD_SIZE_EX_SHIFT 11 +#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */ +#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/ +#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */ +#define IGC_FLUDONE_ATTEMPTS 20000 +#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ + +/* Offset to data in NVM read/write registers */ +#define IGC_NVM_RW_REG_DATA 16 +#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define IGC_NVM_RW_REG_START 1 /* Start operation */ +#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */ + +/* NVM Word Offsets */ +#define NVM_CHECKSUM_REG 0x003F + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* Collision related configuration parameters */ +#define IGC_COLLISION_THRESHOLD 15 +#define IGC_CT_SHIFT 4 +#define IGC_COLLISION_DISTANCE 63 +#define IGC_COLD_SHIFT 12 + +/* Device Status */ +#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define IGC_STATUS_FUNC_SHIFT 2 +#define IGC_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */ +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 +#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */ +#define ADVERTISE_2500_FULL 0x0080 + +#define IGC_ALL_SPEED_DUPLEX_2500 ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500 + +/* Interrupt Cause Read */ +#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */ +#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */ +#define IGC_ICR_LSC BIT(2) /* Link Status Change */ +#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */ +#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */ +#define IGC_ICR_RXO BIT(6) /* Rx overrun */ +#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ +#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ + +/* If this bit asserted, the driver should claim the interrupt */ +#define IGC_ICR_INT_ASSERTED BIT(31) + +#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ + +#define IMS_ENABLE_MASK ( \ + IGC_IMS_RXT0 | \ + IGC_IMS_TXDW | \ + IGC_IMS_RXDMT0 | \ + IGC_IMS_RXSEQ | \ + IGC_IMS_LSC) + +/* Interrupt Mask Set */ +#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ +#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ +#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ +#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ +#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ + +#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ +#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ + +/* Interrupt Cause Set */ +#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */ +#define IGC_ICS_DRSTA IGC_ICR_DRSTA /* Device Reset Aserted */ + +#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define IGC_IVAR_VALID 0x80 +#define IGC_GPIE_NSICR 0x00000001 +#define IGC_GPIE_MSIX_MODE 0x00000010 +#define IGC_GPIE_EIAME 0x40000000 +#define IGC_GPIE_PBA 0x80000000 + +/* Transmit Descriptor bit definitions */ +#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IGC_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IGC_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IGC_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IGC_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IGC_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define IGC_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define IGC_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ +#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* Transmit Control */ +#define IGC_TCTL_EN 0x00000002 /* enable Tx */ +#define IGC_TCTL_PSP 0x00000008 /* pad short packets */ +#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */ +#define IGC_TCTL_COLD 0x003ff000 /* collision distance */ +#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define IGC_TCTL_MULR 0x10000000 /* Multiple request support */ + +#define IGC_CT_SHIFT 4 +#define IGC_COLLISION_THRESHOLD 15 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 +/* Enable XON frame transmission */ +#define IGC_FCRTL_XONE 0x80000000 + +/* Management Control */ +#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ + +/* Receive Control */ +#define IGC_RCTL_RST 0x00000001 /* Software reset */ +#define IGC_RCTL_EN 0x00000002 /* enable */ +#define IGC_RCTL_SBP 0x00000004 /* store bad packet */ +#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define IGC_RCTL_LPE 0x00000020 /* long packet enable */ +#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ + +#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */ + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ + +#define IGC_RXDEXT_STATERR_CE 0x01000000 +#define IGC_RXDEXT_STATERR_SE 0x02000000 +#define IGC_RXDEXT_STATERR_SEQ 0x04000000 +#define IGC_RXDEXT_STATERR_CXE 0x10000000 +#define IGC_RXDEXT_STATERR_TCPE 0x20000000 +#define IGC_RXDEXT_STATERR_IPE 0x40000000 +#define IGC_RXDEXT_STATERR_RXE 0x80000000 + +/* Same mask, but for extended and packet split descriptors */ +#define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \ + IGC_RXDEXT_STATERR_CE | \ + IGC_RXDEXT_STATERR_SE | \ + IGC_RXDEXT_STATERR_SEQ | \ + IGC_RXDEXT_STATERR_CXE | \ + IGC_RXDEXT_STATERR_RXE) + +/* Header split receive */ +#define IGC_RFCTL_IPV6_EX_DIS 0x00010000 +#define IGC_RFCTL_LEF 0x00040000 + +#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ + +#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */ +#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ + +/* GPY211 - I225 defines */ +#define GPY_MMD_MASK 0xFFFF0000 +#define GPY_MMD_SHIFT 16 +#define GPY_REG_MASK 0x0000FFFF + +#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* MAC definitions */ +#define IGC_FACTPS_MNGCG 0x20000000 +#define IGC_FWSM_MODE_MASK 0xE +#define IGC_FWSM_MODE_SHIFT 1 + +/* Management Control */ +#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ + +/* PHY */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define IGC_GEN_POLL_TIMEOUT 1920 + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* Bit definitions for valid PHY IDs. I = Integrated E = External */ +#define I225_I_PHY_ID 0x67C9DC00 + +/* MDI Control */ +#define IGC_MDIC_DATA_MASK 0x0000FFFF +#define IGC_MDIC_REG_MASK 0x001F0000 +#define IGC_MDIC_REG_SHIFT 16 +#define IGC_MDIC_PHY_MASK 0x03E00000 +#define IGC_MDIC_PHY_SHIFT 21 +#define IGC_MDIC_OP_WRITE 0x04000000 +#define IGC_MDIC_OP_READ 0x08000000 +#define IGC_MDIC_READY 0x10000000 +#define IGC_MDIC_INT_EN 0x20000000 +#define IGC_MDIC_ERROR 0x40000000 +#define IGC_MDIC_DEST 0x80000000 + +#define IGC_N0_QUEUE -1 + +#endif /* _IGC_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h new file mode 100644 index 000000000000..c50414f48f0d --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_HW_H_ +#define _IGC_HW_H_ + +#include <linux/types.h> +#include <linux/if_ether.h> +#include <linux/netdevice.h> + +#include "igc_regs.h" +#include "igc_defines.h" +#include "igc_mac.h" +#include "igc_phy.h" +#include "igc_nvm.h" +#include "igc_i225.h" +#include "igc_base.h" + +#define IGC_DEV_ID_I225_LM 0x15F2 +#define IGC_DEV_ID_I225_V 0x15F3 + +#define IGC_FUNC_0 0 + +/* Function pointers for the MAC. */ +struct igc_mac_operations { + s32 (*check_for_link)(struct igc_hw *hw); + s32 (*reset_hw)(struct igc_hw *hw); + s32 (*init_hw)(struct igc_hw *hw); + s32 (*setup_physical_interface)(struct igc_hw *hw); + void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index); + s32 (*read_mac_addr)(struct igc_hw *hw); + s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed, + u16 *duplex); + s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask); + void (*release_swfw_sync)(struct igc_hw *hw, u16 mask); +}; + +enum igc_mac_type { + igc_undefined = 0, + igc_i225, + igc_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum igc_phy_type { + igc_phy_unknown = 0, + igc_phy_none, + igc_phy_i225, +}; + +enum igc_media_type { + igc_media_type_unknown = 0, + igc_media_type_copper = 1, + igc_num_media_types +}; + +enum igc_nvm_type { + igc_nvm_unknown = 0, + igc_nvm_flash_hw, + igc_nvm_invm, +}; + +struct igc_info { + s32 (*get_invariants)(struct igc_hw *hw); + struct igc_mac_operations *mac_ops; + const struct igc_phy_operations *phy_ops; + struct igc_nvm_operations *nvm_ops; +}; + +extern const struct igc_info igc_base_info; + +struct igc_mac_info { + struct igc_mac_operations ops; + + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum igc_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 mta_reg_count; + u16 uta_reg_count; + + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool has_fwsm; + bool asf_firmware_present; + bool arc_subsystem_valid; + + bool autoneg; + bool autoneg_failed; + bool get_link_status; +}; + +struct igc_nvm_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + s32 (*update)(struct igc_hw *hw); + s32 (*validate)(struct igc_hw *hw); + s32 (*valid_led_default)(struct igc_hw *hw, u16 *data); +}; + +struct igc_phy_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*check_polarity)(struct igc_hw *hw); + s32 (*check_reset_block)(struct igc_hw *hw); + s32 (*force_speed_duplex)(struct igc_hw *hw); + s32 (*get_cfg_done)(struct igc_hw *hw); + s32 (*get_cable_length)(struct igc_hw *hw); + s32 (*get_phy_info)(struct igc_hw *hw); + s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*reset)(struct igc_hw *hw); + s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data); +}; + +struct igc_nvm_info { + struct igc_nvm_operations ops; + enum igc_nvm_type type; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct igc_phy_info { + struct igc_phy_operations ops; + + enum igc_phy_type type; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum igc_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u16 pair_length[4]; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct igc_bus_info { + u16 func; + u16 pci_cmd_word; +}; + +enum igc_fc_mode { + igc_fc_none = 0, + igc_fc_rx_pause, + igc_fc_tx_pause, + igc_fc_full, + igc_fc_default = 0xFF +}; + +struct igc_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum igc_fc_mode current_mode; /* Type of flow control */ + enum igc_fc_mode requested_mode; +}; + +struct igc_dev_spec_base { + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + bool module_plugged; + u8 media_port; + bool mas_capable; +}; + +struct igc_hw { + void *back; + + u8 __iomem *hw_addr; + unsigned long io_base; + + struct igc_mac_info mac; + struct igc_fc_info fc; + struct igc_nvm_info nvm; + struct igc_phy_info phy; + + struct igc_bus_info bus; + + union { + struct igc_dev_spec_base _base; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* Statistics counters collected by the MAC */ +struct igc_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct net_device *igc_get_hw_dev(struct igc_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igc_get_hw_dev(hw), format, ##arg) + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); + +#endif /* _IGC_HW_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c new file mode 100644 index 000000000000..c25f555aaf82 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -0,0 +1,490 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include <linux/delay.h> + +#include "igc_hw.h" + +/** + * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +static s32 igc_acquire_nvm_i225(struct igc_hw *hw) +{ + return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_release_nvm_i225 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + */ +static void igc_release_nvm_i225(struct igc_hw *hw) +{ + igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) +{ + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + u32 swsm; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._base.clear_semaphore_once) { + hw->dev_spec._base.clear_semaphore_once = false; + igc_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -IGC_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI) + break; + + usleep_range(500, 600); + } + + if (i == timeout) { + /* Release semaphores */ + igc_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -IGC_ERR_NVM; + } + + return 0; +} + +/** + * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + */ +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + s32 i = 0, timeout = 200; + u32 fwmask = mask << 16; + u32 swmask = mask; + s32 ret_val = 0; + u32 swfw_sync; + + while (i < timeout) { + if (igc_get_hw_semaphore_i225(hw)) { + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igc_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igc_release_swfw_sync_i225 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + */ +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igc_get_hw_semaphore_i225(hw)) + ; /* Empty */ + + swfw_sync = rd32(IGC_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +} + +/** + * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + */ +static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_read_nvm_eerd(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + */ +static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 attempts = 100000; + u32 i, k, eewr = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | + (data[i] << IGC_NVM_RW_REG_DATA) | + IGC_NVM_RW_REG_START; + + wr32(IGC_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (IGC_NVM_RW_REG_DONE & + rd32(IGC_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** + * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + */ +static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_write_nvm_srwr(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) +{ + s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count, + u16 *data); + s32 status = 0; + + status = hw->nvm.ops.acquire(hw); + if (status) + goto out; + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igc_read_nvm_eerd; + + status = igc_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + +out: + return status; +} + +/** + * igc_pool_flash_update_done_i225 - Pool FLUDONE status + * @hw: pointer to the HW structure + */ +static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 i, reg; + + for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { + reg = rd32(IGC_EECD); + if (reg & IGC_EECD_FLUDONE_I225) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igc_update_flash_i225 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + */ +static s32 igc_update_flash_i225(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val == -IGC_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225; + wr32(IGC_EECD, flup); + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum_i225 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + */ +static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) +{ + u16 checksum = 0; + s32 ret_val = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + ret_val = hw->nvm.ops.acquire(hw); + if (ret_val) + goto out; + + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igc_update_flash_i225(hw); + +out: + return ret_val; +} + +/** + * igc_get_flash_presence_i225 - Check if flash device is detected + * @hw: pointer to the HW structure + */ +bool igc_get_flash_presence_i225(struct igc_hw *hw) +{ + bool ret_val = false; + u32 eec = 0; + + eec = rd32(IGC_EECD); + if (eec & IGC_EECD_FLASH_DETECTED_I225) + ret_val = true; + + return ret_val; +} + +/** + * igc_init_nvm_params_i225 - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +s32 igc_init_nvm_params_i225(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igc_acquire_nvm_i225; + nvm->ops.release = igc_release_nvm_i225; + + /* NVM Function Pointers */ + if (igc_get_flash_presence_i225(hw)) { + hw->nvm.type = igc_nvm_flash_hw; + nvm->ops.read = igc_read_nvm_srrd_i225; + nvm->ops.write = igc_write_nvm_srwr_i225; + nvm->ops.validate = igc_validate_nvm_checksum_i225; + nvm->ops.update = igc_update_nvm_checksum_i225; + } else { + hw->nvm.type = igc_nvm_invm; + nvm->ops.read = igc_read_nvm_eerd; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return 0; +} diff --git a/drivers/net/ethernet/intel/igc/igc_i225.h b/drivers/net/ethernet/intel/igc/igc_i225.h new file mode 100644 index 000000000000..7b66e1f9c0e6 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_i225.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_I225_H_ +#define _IGC_I225_H_ + +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask); +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask); + +s32 igc_init_nvm_params_i225(struct igc_hw *hw); +bool igc_get_flash_presence_i225(struct igc_hw *hw); + +#endif diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c new file mode 100644 index 000000000000..f7683d3ae47c --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -0,0 +1,806 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include <linux/pci.h> +#include <linux/delay.h> + +#include "igc_mac.h" +#include "igc_hw.h" + +/* forward declaration */ +static s32 igc_set_default_fc(struct igc_hw *hw); +static s32 igc_set_fc_watermarks(struct igc_hw *hw); + +/** + * igc_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + */ +s32 igc_disable_pcie_master(struct igc_hw *hw) +{ + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_GIO_MASTER_DISABLE; + wr32(IGC_CTRL, ctrl); + + while (timeout) { + if (!(rd32(IGC_STATUS) & + IGC_STATUS_GIO_MASTER_ENABLE)) + break; + usleep_range(2000, 3000); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_init_rx_addrs - Initialize receive addresses + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + */ +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count) +{ + u8 mac_addr[ETH_ALEN] = {0}; + u32 i; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igc_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + */ +s32 igc_setup_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igc_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == igc_fc_default) { + ret_val = igc_set_default_fc(hw); + if (ret_val) + goto out; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(IGC_FCT, FLOW_CONTROL_TYPE); + wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(IGC_FCTTV, hw->fc.pause_time); + + ret_val = igc_set_fc_watermarks(hw); + +out: + return ret_val; +} + +/** + * igc_set_default_fc - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + */ +static s32 igc_set_default_fc(struct igc_hw *hw) +{ + hw->fc.requested_mode = igc_fc_full; + return 0; +} + +/** + * igc_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + */ +s32 igc_force_mac_fc(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case igc_fc_none: + ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE)); + break; + case igc_fc_rx_pause: + ctrl &= (~IGC_CTRL_TFCE); + ctrl |= IGC_CTRL_RFCE; + break; + case igc_fc_tx_pause: + ctrl &= (~IGC_CTRL_RFCE); + ctrl |= IGC_CTRL_TFCE; + break; + case igc_fc_full: + ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + wr32(IGC_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igc_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + */ +static s32 igc_set_fc_watermarks(struct igc_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & igc_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= IGC_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(IGC_FCRTL, fcrtl); + wr32(IGC_FCRTH, fcrth); + + return 0; +} + +/** + * igc_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + */ +void igc_clear_hw_cntrs_base(struct igc_hw *hw) +{ + rd32(IGC_CRCERRS); + rd32(IGC_SYMERRS); + rd32(IGC_MPC); + rd32(IGC_SCC); + rd32(IGC_ECOL); + rd32(IGC_MCC); + rd32(IGC_LATECOL); + rd32(IGC_COLC); + rd32(IGC_DC); + rd32(IGC_SEC); + rd32(IGC_RLEC); + rd32(IGC_XONRXC); + rd32(IGC_XONTXC); + rd32(IGC_XOFFRXC); + rd32(IGC_XOFFTXC); + rd32(IGC_FCRUC); + rd32(IGC_GPRC); + rd32(IGC_BPRC); + rd32(IGC_MPRC); + rd32(IGC_GPTC); + rd32(IGC_GORCL); + rd32(IGC_GORCH); + rd32(IGC_GOTCL); + rd32(IGC_GOTCH); + rd32(IGC_RNBC); + rd32(IGC_RUC); + rd32(IGC_RFC); + rd32(IGC_ROC); + rd32(IGC_RJC); + rd32(IGC_TORL); + rd32(IGC_TORH); + rd32(IGC_TOTL); + rd32(IGC_TOTH); + rd32(IGC_TPR); + rd32(IGC_TPT); + rd32(IGC_MPTC); + rd32(IGC_BPTC); + + rd32(IGC_PRC64); + rd32(IGC_PRC127); + rd32(IGC_PRC255); + rd32(IGC_PRC511); + rd32(IGC_PRC1023); + rd32(IGC_PRC1522); + rd32(IGC_PTC64); + rd32(IGC_PTC127); + rd32(IGC_PTC255); + rd32(IGC_PTC511); + rd32(IGC_PTC1023); + rd32(IGC_PTC1522); + + rd32(IGC_ALGNERRC); + rd32(IGC_RXERRC); + rd32(IGC_TNCRS); + rd32(IGC_CEXTERR); + rd32(IGC_TSCTC); + rd32(IGC_TSCTFC); + + rd32(IGC_MGTPRC); + rd32(IGC_MGTPDC); + rd32(IGC_MGTPTC); + + rd32(IGC_IAC); + rd32(IGC_ICRXOC); + + rd32(IGC_ICRXPTC); + rd32(IGC_ICRXATC); + rd32(IGC_ICTXPTC); + rd32(IGC_ICTXATC); + rd32(IGC_ICTXQEC); + rd32(IGC_ICTXQMTC); + rd32(IGC_ICRXDMTC); + + rd32(IGC_CBTMPC); + rd32(IGC_HTDPMC); + rd32(IGC_CBRMPC); + rd32(IGC_RPTHC); + rd32(IGC_HGPTC); + rd32(IGC_HTCBDPC); + rd32(IGC_HGORCL); + rd32(IGC_HGORCH); + rd32(IGC_HGOTCL); + rd32(IGC_HGOTCH); + rd32(IGC_LENERRS); +} + +/** + * igc_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + */ +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= IGC_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(IGC_RAL(index), rar_low); + wrfl(); + wr32(IGC_RAH(index), rar_high); + wrfl(); +} + +/** + * igc_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + */ +s32 igc_check_for_copper_link(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igc_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igc_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igc_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igc_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + return ret_val; +} + +/** + * igc_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +void igc_config_collision_dist(struct igc_hw *hw) +{ + u32 tctl; + + tctl = rd32(IGC_TCTL); + + tctl &= ~IGC_TCTL_COLD; + tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT; + + wr32(IGC_TCTL, tctl); + wrfl(); +} + +/** + * igc_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + */ +s32 igc_config_fc_after_link_up(struct igc_hw *hw) +{ + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + struct igc_mac_info *mac = &hw->mac; + u16 speed, duplex; + s32 ret_val = 0; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == igc_media_type_copper) + ret_val = igc_force_mac_fc(hw); + } + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if (hw->phy.media_type == igc_media_type_copper && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | igc_fc_none + * 0 | 1 | 0 | DC | igc_fc_none + * 0 | 1 | 1 | 0 | igc_fc_none + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + * 1 | 0 | 0 | DC | igc_fc_none + * 1 | DC | 1 | DC | igc_fc_full + * 1 | 1 | 0 | 0 | igc_fc_none + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | IGC_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == igc_fc_full) { + hw->fc.current_mode = igc_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == igc_fc_none) || + (hw->fc.requested_mode == igc_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = igc_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = igc_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igc_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return 0; +} + +/** + * igc_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + */ +s32 igc_get_auto_rd_done(struct igc_hw *hw) +{ + s32 ret_val = 0; + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -IGC_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + */ +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both 1 Gbps + * and 2.5 Gbps link modes. An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + *speed = SPEED_2500; + hw_dbg("2500 Mbs, "); + } else { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } + } else if (status & IGC_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & IGC_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igc_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + */ +void igc_put_hw_semaphore(struct igc_hw *hw) +{ + u32 swsm; + + swsm = rd32(IGC_SWSM); + + swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI); + + wr32(IGC_SWSM, swsm); +} + +/** + * igc_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + */ +bool igc_enable_mng_pass_thru(struct igc_hw *hw) +{ + bool ret_val = false; + u32 fwsm, factps; + u32 manc; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(IGC_MANC); + + if (!(manc & IGC_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(IGC_FWSM); + factps = rd32(IGC_FACTPS); + + if (!(factps & IGC_FACTPS_MNGCG) && + ((fwsm & IGC_FWSM_MODE_MASK) == + (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & IGC_MANC_SMBUS_EN) && + !(manc & IGC_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} diff --git a/drivers/net/ethernet/intel/igc/igc_mac.h b/drivers/net/ethernet/intel/igc/igc_mac.h new file mode 100644 index 000000000000..782bc995badc --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_mac.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_MAC_H_ +#define _IGC_MAC_H_ + +#include "igc_hw.h" +#include "igc_phy.h" +#include "igc_defines.h" + +#ifndef IGC_REMOVED +#define IGC_REMOVED(a) (0) +#endif /* IGC_REMOVED */ + +/* forward declaration */ +s32 igc_disable_pcie_master(struct igc_hw *hw); +s32 igc_check_for_copper_link(struct igc_hw *hw); +s32 igc_config_fc_after_link_up(struct igc_hw *hw); +s32 igc_force_mac_fc(struct igc_hw *hw); +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count); +s32 igc_setup_link(struct igc_hw *hw); +void igc_clear_hw_cntrs_base(struct igc_hw *hw); +s32 igc_get_auto_rd_done(struct igc_hw *hw); +void igc_put_hw_semaphore(struct igc_hw *hw); +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index); +void igc_config_collision_dist(struct igc_hw *hw); + +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex); + +bool igc_enable_mng_pass_thru(struct igc_hw *hw); + +enum igc_mng_mode { + igc_mng_mode_none = 0, + igc_mng_mode_asf, + igc_mng_mode_pt, + igc_mng_mode_ipmi, + igc_mng_mode_host_if_only +}; + +#endif diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c new file mode 100644 index 000000000000..9d85707e8a81 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -0,0 +1,3901 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/if_vlan.h> +#include <linux/aer.h> + +#include "igc.h" +#include "igc_hw.h" + +#define DRV_VERSION "0.0.1-k" +#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" + +static int debug = -1; + +MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +char igc_driver_name[] = "igc"; +char igc_driver_version[] = DRV_VERSION; +static const char igc_driver_string[] = DRV_SUMMARY; +static const char igc_copyright[] = + "Copyright(c) 2018 Intel Corporation."; + +static const struct igc_info *igc_info_tbl[] = { + [board_base] = &igc_base_info, +}; + +static const struct pci_device_id igc_pci_tbl[] = { + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, igc_pci_tbl); + +/* forward declaration */ +static void igc_clean_tx_ring(struct igc_ring *tx_ring); +static int igc_sw_init(struct igc_adapter *); +static void igc_configure(struct igc_adapter *adapter); +static void igc_power_down_link(struct igc_adapter *adapter); +static void igc_set_default_mac_filter(struct igc_adapter *adapter); +static void igc_set_rx_mode(struct net_device *netdev); +static void igc_write_itr(struct igc_q_vector *q_vector); +static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector); +static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx); +static void igc_set_interrupt_capability(struct igc_adapter *adapter, + bool msix); +static void igc_free_q_vectors(struct igc_adapter *adapter); +static void igc_irq_disable(struct igc_adapter *adapter); +static void igc_irq_enable(struct igc_adapter *adapter); +static void igc_configure_msix(struct igc_adapter *adapter); +static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *bi); + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +static void igc_reset(struct igc_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + + hw->mac.ops.reset_hw(hw); + + if (hw->mac.ops.init_hw(hw)) + dev_err(&pdev->dev, "Hardware Error\n"); + + if (!netif_running(adapter->netdev)) + igc_power_down_link(adapter); + + igc_get_phy_info(hw); +} + +/** + * igc_power_up_link - Power up the phy/serdes link + * @adapter: address of board private structure + */ +static void igc_power_up_link(struct igc_adapter *adapter) +{ + igc_reset_phy(&adapter->hw); + + if (adapter->hw.phy.media_type == igc_media_type_copper) + igc_power_up_phy_copper(&adapter->hw); + + igc_setup_link(&adapter->hw); +} + +/** + * igc_power_down_link - Power down the phy/serdes link + * @adapter: address of board private structure + */ +static void igc_power_down_link(struct igc_adapter *adapter) +{ + if (adapter->hw.phy.media_type == igc_media_type_copper) + igc_power_down_phy_copper_base(&adapter->hw); +} + +/** + * igc_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void igc_release_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); +} + +/** + * igc_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + */ +static void igc_get_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); +} + +/** + * igc_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + */ +static void igc_free_tx_resources(struct igc_ring *tx_ring) +{ + igc_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igc_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void igc_free_all_tx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igc_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + */ +static void igc_clean_tx_ring(struct igc_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + + while (i != tx_ring->next_to_use) { + union igc_adv_tx_desc *eop_desc, *tx_desc; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGC_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igc_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_tx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igc_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igc_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + */ +static int igc_setup_tx_resources(struct igc_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int size = 0; + + size = sizeof(struct igc_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + dev_err(dev, + "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_tx_resources(struct igc_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igc_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Tx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igc_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * igc_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + */ +static void igc_clean_rx_ring(struct igc_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + if (rx_ring->skb) + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * igc_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_rx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igc_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igc_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + */ +static void igc_free_rx_resources(struct igc_ring *rx_ring) +{ + igc_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igc_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + */ +static void igc_free_all_rx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + igc_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igc_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + */ +static int igc_setup_rx_resources(struct igc_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int size, desc_len; + + size = sizeof(struct igc_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + desc_len = sizeof(union igc_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; + +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_rx_resources(struct igc_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igc_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Rx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igc_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * igc_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + union igc_adv_rx_desc *rx_desc; + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + u64 rdba = ring->dma; + + /* disable the queue */ + wr32(IGC_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(IGC_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(IGC_RDBAH(reg_idx), rdba >> 32); + wr32(IGC_RDLEN(reg_idx), + ring->count * sizeof(union igc_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + IGC_RDT(reg_idx); + wr32(IGC_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* reset next-to- use/clean to place SW in sync with hardware */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + /* set descriptor configuration */ + srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; + if (ring_uses_large_buffer(ring)) + srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; + + wr32(IGC_SRRCTL(reg_idx), srrctl); + + rxdctl |= IGC_RX_PTHRESH; + rxdctl |= IGC_RX_HTHRESH << 8; + rxdctl |= IGC_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igc_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGC_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; + + wr32(IGC_RXDCTL(reg_idx), rxdctl); +} + +/** + * igc_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx(struct igc_adapter *adapter) +{ + int i; + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + igc_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +/** + * igc_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + */ +static void igc_configure_tx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u64 tdba = ring->dma; + u32 txdctl = 0; + + /* disable the queue */ + wr32(IGC_TXDCTL(reg_idx), 0); + wrfl(); + mdelay(10); + + wr32(IGC_TDLEN(reg_idx), + ring->count * sizeof(union igc_adv_tx_desc)); + wr32(IGC_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(IGC_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + IGC_TDT(reg_idx); + wr32(IGC_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGC_TX_PTHRESH; + txdctl |= IGC_TX_HTHRESH << 8; + txdctl |= IGC_TX_WTHRESH << 16; + + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; + wr32(IGC_TXDCTL(reg_idx), txdctl); +} + +/** + * igc_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + */ +static void igc_configure_tx(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igc_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + */ +static void igc_setup_mrqc(struct igc_adapter *adapter) +{ +} + +/** + * igc_setup_rctl - configure the receive control registers + * @adapter: Board private structure + */ +static void igc_setup_rctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(IGC_RCTL); + + rctl &= ~(3 << IGC_RCTL_MO_SHIFT); + rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); + + rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); + + /* enable stripping of CRC. Newer features require + * that the HW strips the CRC. + */ + rctl |= IGC_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= IGC_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(IGC_RXDCTL(0), 0); + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in set_rx_mode + */ + rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ + IGC_RCTL_BAM | /* RX All Bcast Pkts */ + IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ + IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ + } + + wr32(IGC_RCTL, rctl); +} + +/** + * igc_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + */ +static void igc_setup_tctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which icould be enabled by default */ + wr32(IGC_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_CT; + tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | + (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); + + /* Enable transmits */ + tctl |= IGC_TCTL_EN; + + wr32(IGC_TCTL, tctl); +} + +/** + * igc_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int igc_set_mac(struct net_device *netdev, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igc_set_default_mac_filter(adapter); + + return 0; +} + +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) +{ +} + +static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + + netif_stop_subqueue(netdev, tx_ring->queue_index); + + /* memory barriier comment */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igc_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + if (igc_desc_unused(tx_ring) >= size) + return 0; + return __igc_maybe_stop_tx(tx_ring, size); +} + +static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = IGC_ADVTXD_DTYP_DATA | + IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS; + + return cmd_type; +} + +static void igc_tx_olinfo_status(struct igc_ring *tx_ring, + union igc_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; + + /* insert L4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * + ((IGC_TXD_POPTS_TXSM << 8) / + IGC_TX_FLAGS_CSUM); + + /* insert IPv4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * + (((IGC_TXD_POPTS_IXSM << 8)) / + IGC_TX_FLAGS_IPV4); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int igc_tx_map(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 tx_flags = first->tx_flags; + struct skb_frag_struct *frag; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + dma_addr_t dma; + u32 cmd_type = igc_tx_cmd_type(skb, tx_flags); + + tx_desc = IGC_TX_DESC(tx_ring, i); + + igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGC_MAX_DATA_PER_TXD; + size -= IGC_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGC_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igc_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + } + + return 0; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, + struct igc_ring *tx_ring) +{ + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + struct igc_tx_buffer *first; + u32 tx_flags = 0; + unsigned short f; + u8 hdr_len = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + + if (igc_maybe_stop_tx(tx_ring, count + 3)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + skb_tx_timestamp(skb); + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + igc_tx_csum(tx_ring, first); + + igc_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; +} + +static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; + } + + return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); +} + +static inline void igc_rx_hash(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); +} + +/** + * igc_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + */ +static void igc_process_skb_fields(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + igc_rx_hash(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, + const unsigned int size) +{ + struct igc_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +/** + * igc_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + */ +static void igc_add_rx_frag(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + rx_buffer->page_offset ^= truesize; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + union igc_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IGC_SKB_PAD + size); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(va - IGC_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, IGC_SKB_PAD); + __skb_put(skb, size); + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + union igc_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); + if (unlikely(!skb)) + return NULL; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGC_RX_HDR_LEN) + headlen = eth_get_headlen(va, IGC_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +/** + * igc_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void igc_reuse_rx_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct igc_rx_buffer *new_buff; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static inline bool igc_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote pages */ + if (unlikely(igc_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + return false; +#else +#define IGC_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGC_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(!pagecnt_bias)) { + page_ref_add(page, USHRT_MAX); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igc_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + */ +static bool igc_is_non_eop(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGC_RX_DESC(rx_ring, ntc)); + + if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igc_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool igc_cleanup_headers(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely((igc_test_staterr(rx_desc, + IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static void igc_put_rx_buffer(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer) +{ + if (igc_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + igc_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +/** + * igc_alloc_rx_buffers - Replace used receive buffers; packet split + * @adapter: address of board private structure + */ +static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) +{ + union igc_adv_rx_desc *rx_desc; + u16 i = rx_ring->next_to_use; + struct igc_rx_buffer *bi; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGC_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igc_rx_bufsz(rx_ring); + + do { + if (!igc_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGC_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct igc_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + u16 cleaned_count = igc_desc_unused(rx_ring); + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *rx_desc; + struct igc_rx_buffer *rx_buffer; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGC_RX_BUFFER_WRITE) { + igc_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igc_get_rx_buffer(rx_ring, size); + + /* retrieve a buffer from the ring */ + if (skb) + igc_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); + else + skb = igc_construct_skb(rx_ring, rx_buffer, + rx_desc, size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igc_put_rx_buffer(rx_ring, rx_buffer); + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igc_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + igc_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.packets += total_packets; + rx_ring->rx_stats.bytes += total_bytes; + u64_stats_update_end(&rx_ring->rx_syncp); + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; + + if (cleaned_count) + igc_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0; +} + +static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igc_rx_offset(rx_ring); + bi->pagecnt_bias = 1; + + return true; +} + +/** + * igc_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + */ +static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + struct igc_ring *tx_ring = q_vector->tx.ring; + unsigned int i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGC_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buffer->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + dma_unmap_len_set(tx_buffer, len, 0); + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.bytes += total_bytes; + tx_ring->tx_stats.packets += total_packets; + u64_stats_update_end(&tx_ring->tx_syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct igc_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { + /* detected Tx unit hang */ + dev_err(tx_ring->dev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(IGC_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && + netif_carrier_ok(tx_ring->netdev) && + igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGC_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +/** + * igc_ioctl - I/O control method + * @netdev: network interface device structure + * @ifreq: frequency + * @cmd: command + */ +static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + default: + return -EOPNOTSUPP; + } +} + +/** + * igc_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + */ +static void igc_up(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i = 0; + + /* hardware has been reset, we need to reload some things */ + igc_configure(adapter); + + clear_bit(__IGC_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + + if (adapter->msix_entries) + igc_configure_msix(adapter); + else + igc_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); +} + +/** + * igc_update_stats - Update the board statistics counters + * @adapter: board private structure + */ +static void igc_update_stats(struct igc_adapter *adapter) +{ +} + +static void igc_nfc_filter_exit(struct igc_adapter *adapter) +{ +} + +/** + * igc_down - Close the interface + * @adapter: board private structure + */ +static void igc_down(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i = 0; + + set_bit(__IGC_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + + igc_nfc_filter_exit(adapter); + + /* set trans_start so we don't get spurious watchdogs during reset */ + netif_trans_update(netdev); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); + + igc_irq_disable(adapter); + + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igc_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; + + igc_clean_all_tx_rings(adapter); + igc_clean_all_rx_rings(adapter); +} + +static void igc_reinit_locked(struct igc_adapter *adapter) +{ + WARN_ON(in_interrupt()); + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igc_down(adapter); + igc_up(adapter); + clear_bit(__IGC_RESETTING, &adapter->state); +} + +static void igc_reset_task(struct work_struct *work) +{ + struct igc_adapter *adapter; + + adapter = container_of(work, struct igc_adapter, reset_task); + + netdev_err(adapter->netdev, "Reset adapter\n"); + igc_reinit_locked(adapter); +} + +/** + * igc_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int igc_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igc_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igc_down(adapter); + + dev_info(&pdev->dev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igc_up(adapter); + else + igc_reset(adapter); + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +/** + * igc_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are updated here and also from the timer callback. + */ +static struct net_device_stats *igc_get_stats(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__IGC_RESETTING, &adapter->state)) + igc_update_stats(adapter); + + /* only return the current stats */ + return &netdev->stats; +} + +/** + * igc_configure - configure the hardware for RX and TX + * @adapter: private board structure + */ +static void igc_configure(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i = 0; + + igc_get_hw_control(adapter); + igc_set_rx_mode(netdev); + + igc_setup_tctl(adapter); + igc_setup_mrqc(adapter); + igc_setup_rctl(adapter); + + igc_configure_tx(adapter); + igc_configure_rx(adapter); + + igc_rx_fifo_flush_base(&adapter->hw); + + /* call igc_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); + } +} + +/** + * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table + * @adapter: Pointer to adapter structure + * @index: Index of the RAR entry which need to be synced with MAC table + */ +static void igc_rar_set_index(struct igc_adapter *adapter, u32 index) +{ + u8 *addr = adapter->mac_table[index].addr; + struct igc_hw *hw = &adapter->hw; + u32 rar_low, rar_high; + + /* HW expects these to be in network order when they are plugged + * into the registers which are little endian. In order to guarantee + * that ordering we need to do an leXX_to_cpup here in order to be + * ready for the byteswap that occurs with writel + */ + rar_low = le32_to_cpup((__le32 *)(addr)); + rar_high = le16_to_cpup((__le16 *)(addr + 4)); + + /* Indicate to hardware the Address is Valid. */ + if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) { + if (is_valid_ether_addr(addr)) + rar_high |= IGC_RAH_AV; + + rar_high |= IGC_RAH_POOL_1 << + adapter->mac_table[index].queue; + } + + wr32(IGC_RAL(index), rar_low); + wrfl(); + wr32(IGC_RAH(index), rar_high); + wrfl(); +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igc_set_default_mac_filter(struct igc_adapter *adapter) +{ + struct igc_mac_addr *mac_table = &adapter->mac_table[0]; + + ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); + mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE; + + igc_rar_set_index(adapter, 0); +} + +/** + * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + */ +static void igc_set_rx_mode(struct net_device *netdev) +{ +} + +/** + * igc_msix_other - msix other interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t igc_msix_other(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_hw *hw = &adapter->hw; + u32 icr = rd32(IGC_ICR); + + /* reading ICR causes bit 31 of EICR to be cleared */ + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & IGC_ICR_LSC) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + wr32(IGC_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +/** + * igc_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + */ +static void igc_write_ivar(struct igc_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(IGC_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | IGC_IVAR_VALID) << offset; + + array_wr32(IGC_IVAR0, index, ivar); +} + +static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + int rx_queue = IGC_N0_QUEUE; + int tx_queue = IGC_N0_QUEUE; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case igc_i225: + if (rx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igc_configure_msix - Configure MSI-X hardware + * @adapter: Pointer to adapter structure + * + * igc_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + */ +static void igc_configure_msix(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i, vector = 0; + u32 tmp; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case igc_i225: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | + IGC_GPIE_PBA | IGC_GPIE_EIAME | + IGC_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | IGC_IVAR_VALID) << 8; + + wr32(IGC_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igc_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +static irqreturn_t igc_msix_ring(int irq, void *data) +{ + struct igc_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igc_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_request_msix - Initialize MSI-X interrupts + * @adapter: Pointer to adapter structure + * + * igc_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + */ +static int igc_request_msix(struct igc_adapter *adapter) +{ + int i = 0, err = 0, vector = 0, free_vector = 0; + struct net_device *netdev = adapter->netdev; + + err = request_irq(adapter->msix_entries[vector].vector, + &igc_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igc_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igc_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igc_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igc_free_q_vector. + */ +static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* if we're coming from igc_set_interrupt_capability, the vectors are + * not yet allocated + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + netif_napi_del(&q_vector->napi); +} + +static void igc_reset_interrupt_capability(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IGC_FLAG_HAS_MSI) { + pci_disable_msi(adapter->pdev); + } + + while (v_idx--) + igc_reset_q_vector(adapter, v_idx); +} + +/** + * igc_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: Pointer to adapter structure + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) +{ + igc_free_q_vectors(adapter); + igc_reset_interrupt_capability(adapter); +} + +/** + * igc_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + */ +static void igc_free_q_vectors(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igc_reset_q_vector(adapter, v_idx); + igc_free_q_vector(adapter, v_idx); + } +} + +/** + * igc_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + */ +static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igc_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igc_update_phy_info(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); + + igc_get_phy_info(&adapter->hw); +} + +/** + * igc_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + */ +static bool igc_has_link(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the igc_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case igc_media_type_copper: + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + break; + default: + case igc_media_type_unknown: + break; + } + + if (hw->mac.type == igc_i225 && + hw->phy.id == I225_I_PHY_ID) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +/** + * igc_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + */ +static void igc_watchdog(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igc_watchdog_task(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, + struct igc_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_phy_info *phy = &hw->phy; + u16 phy_data, retry_count = 20; + u32 connsw; + u32 link; + int i; + + link = igc_has_link(adapter); + + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + /* Force link down if we have fiber to swap to */ + if (adapter->flags & IGC_FLAG_MAS_ENABLE) { + if (hw->phy.media_type == igc_media_type_copper) { + connsw = rd32(IGC_CONNSW); + if (!(connsw & IGC_CONNSW_AUTOSENSE_EN)) + link = 0; + } + } + if (link) { + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(IGC_CTRL); + /* Link status message must follow this format */ + netdev_info(netdev, + "igc: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & IGC_CTRL_TFCE) && + (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : + (ctrl & IGC_CTRL_RFCE) ? "RX" : + (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); + + /* check if SmartSpeed worked */ + igc_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); + } + } else { + dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* Links status message must follow this format */ + netdev_info(netdev, "igc: %s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* link is down, time to check for alternate media */ + if (adapter->flags & IGC_FLAG_MAS_ENABLE) { + if (adapter->flags & IGC_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGC_FLAG_MAS_ENABLE)) { + if (adapter->flags & IGC_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + } + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(IGC_EICS, eics); + } else { + wr32(IGC_ICS, IGC_ICS_RXDMT0); + } + + /* Reset the timer */ + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +/** + * igc_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igc_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + */ +static void igc_update_ring_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + new_val = IGC_4K_ITR; + goto set_itr_val; + default: + break; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if (avg_wire_size > 300 && avg_wire_size < 1200) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGC_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGC_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +/** + * igc_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + */ +static void igc_update_itr(struct igc_q_vector *q_vector, + struct igc_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes / packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes / packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +/** + * igc_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr_msi(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(IGC_ICR); + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = 1; + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(IGC_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & IGC_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igc_set_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + current_itr = 0; + new_itr = IGC_4K_ITR; + goto set_itr_now; + default: + break; + } + + igc_update_itr(q_vector, &q_vector->tx); + igc_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igc_ring_irq_enable(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if (adapter->num_q_vectors == 1) + igc_set_itr(q_vector); + else + igc_update_ring_itr(q_vector); + } + + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->msix_entries) + wr32(IGC_EIMS, q_vector->eims_value); + else + igc_irq_enable(adapter); + } +} + +/** + * igc_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + */ +static int igc_poll(struct napi_struct *napi, int budget) +{ + struct igc_q_vector *q_vector = container_of(napi, + struct igc_q_vector, + napi); + bool clean_complete = true; + int work_done = 0; + + if (q_vector->tx.ring) + clean_complete = igc_clean_tx_irq(q_vector, budget); + + if (q_vector->rx.ring) { + int cleaned = igc_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* If not enough Rx work done, exit the polling mode */ + napi_complete_done(napi, work_done); + igc_ring_irq_enable(q_vector); + + return 0; +} + +/** + * igc_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: Pointer to adapter structure + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static void igc_set_interrupt_capability(struct igc_adapter *adapter, + bool msix) +{ + int numvecs, i; + int err; + + if (!msix) + goto msi_only; + adapter->flags |= IGC_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + + adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), + GFP_KERNEL); + + if (!adapter->msix_entries) + return; + + /* populate entry values */ + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + igc_reset_interrupt_capability(adapter); + +msi_only: + adapter->flags &= ~IGC_FLAG_HAS_MSIX; + + adapter->rss_queues = 1; + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGC_FLAG_HAS_MSI; +} + +static void igc_add_ring(struct igc_ring *ring, + struct igc_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igc_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int igc_alloc_q_vector(struct igc_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct igc_q_vector *q_vector; + struct igc_ring *ring; + int ring_count, size; + + /* igc only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + size = sizeof(struct igc_q_vector) + + (sizeof(struct igc_ring) * ring_count); + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + else + memset(q_vector, 0, size); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + igc_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + IGC_EITR(0); + q_vector->itr_val = IGC_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* initialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igc_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igc_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + +/** + * igc_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +static int igc_alloc_q_vectors(struct igc_adapter *adapter) +{ + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int q_vectors = adapter->num_q_vectors; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igc_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igc_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + */ +static void igc_cache_ring_register(struct igc_adapter *adapter) +{ + int i = 0, j = 0; + + switch (adapter->hw.mac.type) { + case igc_i225: + /* Fall through */ + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = j; + break; + } +} + +/** + * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: Pointer to adapter structure + * + * This function initializes the interrupts and allocates all of the queues. + */ +static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) +{ + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + igc_set_interrupt_capability(adapter, msix); + + err = igc_alloc_q_vectors(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igc_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igc_reset_interrupt_capability(adapter); + return err; +} + +static void igc_free_irq(struct igc_adapter *adapter) +{ + if (adapter->msix_entries) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igc_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void igc_irq_disable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 regval = rd32(IGC_EIAM); + + wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); + wr32(IGC_EIMC, adapter->eims_enable_mask); + regval = rd32(IGC_EIAC); + wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(IGC_IAM, 0); + wr32(IGC_IMC, ~0); + wrfl(); + + if (adapter->msix_entries) { + int vector = 0, i; + + synchronize_irq(adapter->msix_entries[vector++].vector); + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * igc_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void igc_irq_enable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; + u32 regval = rd32(IGC_EIAC); + + wr32(IGC_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(IGC_EIAM); + wr32(IGC_EIAM, regval | adapter->eims_enable_mask); + wr32(IGC_EIMS, adapter->eims_enable_mask); + wr32(IGC_IMS, ims); + } else { + wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + } +} + +/** + * igc_request_irq - initialize interrupts + * @adapter: Pointer to adapter structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static int igc_request_irq(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + err = igc_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + igc_clear_interrupt_scheme(adapter); + err = igc_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + igc_setup_all_tx_resources(adapter); + igc_setup_all_rx_resources(adapter); + igc_configure(adapter); + } + + igc_assign_vector(adapter->q_vector[0], 0); + + if (adapter->flags & IGC_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, &igc_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igc_reset_interrupt_capability(adapter); + adapter->flags &= ~IGC_FLAG_HAS_MSI; + } + + err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + dev_err(&pdev->dev, "Error %d getting interrupt\n", + err); + +request_done: + return err; +} + +static void igc_write_itr(struct igc_q_vector *q_vector) +{ + u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = IGC_ITR_VAL_MASK; + + itr_val |= IGC_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +/** + * igc_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int __igc_open(struct net_device *netdev, bool resuming) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int err = 0; + int i = 0; + + /* disallow open during test */ + + if (test_bit(__IGC_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = igc_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igc_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igc_power_up_link(adapter); + + igc_configure(adapter); + + err = igc_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + + clear_bit(__IGC_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + netif_tx_start_all_queues(netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + + return IGC_SUCCESS; + +err_set_queues: + igc_free_irq(adapter); +err_req_irq: + igc_release_hw_control(adapter); + igc_power_down_link(adapter); + igc_free_all_rx_resources(adapter); +err_setup_rx: + igc_free_all_tx_resources(adapter); +err_setup_tx: + igc_reset(adapter); + + return err; +} + +static int igc_open(struct net_device *netdev) +{ + return __igc_open(netdev, false); +} + +/** + * igc_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int __igc_close(struct net_device *netdev, bool suspending) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); + + igc_down(adapter); + + igc_release_hw_control(adapter); + + igc_free_irq(adapter); + + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + return 0; +} + +static int igc_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igc_close(netdev, false); + return 0; +} + +static const struct net_device_ops igc_netdev_ops = { + .ndo_open = igc_open, + .ndo_stop = igc_close, + .ndo_start_xmit = igc_xmit_frame, + .ndo_set_mac_address = igc_set_mac, + .ndo_change_mtu = igc_change_mtu, + .ndo_get_stats = igc_get_stats, + .ndo_do_ioctl = igc_ioctl, +}; + +/* PCIe configuration access */ +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + u16 cap_offset; + + cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); + if (!cap_offset) + return -IGC_ERR_CONFIG; + + pci_read_config_word(adapter->pdev, cap_offset + reg, value); + + return IGC_SUCCESS; +} + +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + u16 cap_offset; + + cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); + if (!cap_offset) + return -IGC_ERR_CONFIG; + + pci_write_config_word(adapter->pdev, cap_offset + reg, *value); + + return IGC_SUCCESS; +} + +u32 igc_rd32(struct igc_hw *hw, u32 reg) +{ + struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + if (IGC_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igc->netdev; + + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + } + + return value; +} + +/** + * igc_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igc_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igc_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring the adapter private structure, + * and a hardware reset occur. + */ +static int igc_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct igc_adapter *adapter; + struct net_device *netdev; + struct igc_hw *hw; + const struct igc_info *ei = igc_info_tbl[ent->driver_data]; + int err, pci_using_dac; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(64)); + if (!err) + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + IGC_ERR("Wrong DMA configuration, aborting\n"); + goto err_dma; + } + } + } + + err = pci_request_selected_regions(pdev, + pci_select_bars(pdev, + IORESOURCE_MEM), + igc_driver_name); + if (err) + goto err_pci_reg; + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), + IGC_MAX_TX_QUEUES); + + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->port_num = hw->bus.func; + adapter->msg_enable = GENMASK(debug - 1, 0); + + err = pci_save_state(pdev); + if (err) + goto err_ioremap; + + err = -EIO; + adapter->io_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!adapter->io_addr) + goto err_ioremap; + + /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igc_netdev_ops; + + netdev->watchdog_timeo = 5 * HZ; + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC and PHY function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* setup the private structure */ + err = igc_sw_init(adapter); + if (err) + goto err_sw_init; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + /* configure RXPBSIZE and TXPBSIZE */ + wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + + timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igc_reset_task); + INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0xaf; + + hw->fc.requested_mode = igc_fc_default; + hw->fc.current_mode = igc_fc_default; + + /* reset the hardware with the new settings */ + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + strncpy(netdev->name, "eth%d", IFNAMSIZ); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + + /* print pcie link status and MAC address */ + pcie_print_link_status(pdev); + netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); + + return 0; + +err_register: + igc_release_hw_control(adapter); +err_eeprom: + if (!igc_check_reset_block(hw)) + igc_reset_phy(hw); +err_sw_init: + igc_clear_interrupt_scheme(adapter); + iounmap(adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igc_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igc_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void igc_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + set_bit(__IGC_DOWN, &adapter->state); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + unregister_netdev(netdev); + + igc_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, adapter->io_addr); + pci_release_mem_regions(pdev); + + kfree(adapter->mac_table); + kfree(adapter->shadow_vfta); + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +static struct pci_driver igc_driver = { + .name = igc_driver_name, + .id_table = igc_pci_tbl, + .probe = igc_probe, + .remove = igc_remove, +}; + +static void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues) +{ + /* Determine if we need to pair queues. */ + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; +} + +static unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) +{ + unsigned int max_rss_queues; + + /* Determine the maximum number of RSS queues supported. */ + max_rss_queues = IGC_MAX_RX_QUEUES; + + return max_rss_queues; +} + +static void igc_init_queue_configuration(struct igc_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igc_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igc_set_flag_queue_pairs(adapter, max_rss_queues); +} + +/** + * igc_sw_init - Initialize general software structures (struct igc_adapter) + * @adapter: board private structure to initialize + * + * igc_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int igc_sw_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + + int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGC_DEFAULT_TXD; + adapter->rx_ring_count = IGC_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGC_DEFAULT_ITR; + adapter->tx_itr_setting = IGC_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; + + /* adjust max frame to be at least the size of a standard frame */ + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->nfc_lock); + spin_lock_init(&adapter->stats64_lock); + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGC_FLAG_HAS_MSIX; + + adapter->mac_table = kzalloc(size, GFP_ATOMIC); + if (!adapter->mac_table) + return -ENOMEM; + + igc_init_queue_configuration(adapter); + + /* This call may decrease the number of queues */ + if (igc_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igc_irq_disable(adapter); + + set_bit(__IGC_DOWN, &adapter->state); + + return 0; +} + +/** + * igc_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + */ +struct net_device *igc_get_hw_dev(struct igc_hw *hw) +{ + struct igc_adapter *adapter = hw->back; + + return adapter->netdev; +} + +/** + * igc_init_module - Driver Registration Routine + * + * igc_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init igc_init_module(void) +{ + int ret; + + pr_info("%s - version %s\n", + igc_driver_string, igc_driver_version); + + pr_info("%s\n", igc_copyright); + + ret = pci_register_driver(&igc_driver); + return ret; +} + +module_init(igc_init_module); + +/** + * igc_exit_module - Driver Exit Cleanup Routine + * + * igc_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit igc_exit_module(void) +{ + pci_unregister_driver(&igc_driver); +} + +module_exit(igc_exit_module); +/* igc_main.c */ diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c new file mode 100644 index 000000000000..58f81aba0144 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_nvm.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_mac.h" +#include "igc_nvm.h" + +/** + * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + */ +static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == IGC_NVM_POLL_READ) + reg = rd32(IGC_EERD); + else + reg = rd32(IGC_EEWR); + + if (reg & IGC_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igc_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +s32 igc_acquire_nvm(struct igc_hw *hw) +{ + s32 timeout = IGC_NVM_GRANT_ATTEMPTS; + u32 eecd = rd32(IGC_EECD); + s32 ret_val = 0; + + wr32(IGC_EECD, eecd | IGC_EECD_REQ); + eecd = rd32(IGC_EECD); + + while (timeout) { + if (eecd & IGC_EECD_GNT) + break; + udelay(5); + eecd = rd32(IGC_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -IGC_ERR_NVM; + } + + return ret_val; +} + +/** + * igc_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + */ +void igc_release_nvm(struct igc_hw *hw) +{ + u32 eecd; + + eecd = rd32(IGC_EECD); + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); +} + +/** + * igc_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + */ +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) + + IGC_NVM_RW_REG_START; + + wr32(IGC_EERD, eerd); + ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igc_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + */ +s32 igc_read_mac_addr(struct igc_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(IGC_RAH(0)); + rar_low = rd32(IGC_RAL(0)); + + for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igc_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +s32 igc_validate_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val = 0; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + */ +s32 igc_update_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.h b/drivers/net/ethernet/intel/igc/igc_nvm.h new file mode 100644 index 000000000000..f9fc2e9cfb03 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_nvm.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_NVM_H_ +#define _IGC_NVM_H_ + +s32 igc_acquire_nvm(struct igc_hw *hw); +void igc_release_nvm(struct igc_hw *hw); +s32 igc_read_mac_addr(struct igc_hw *hw); +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +s32 igc_validate_nvm_checksum(struct igc_hw *hw); +s32 igc_update_nvm_checksum(struct igc_hw *hw); + +#endif diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c new file mode 100644 index 000000000000..38e43e6fc1c7 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -0,0 +1,791 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_phy.h" + +/* forward declaration */ +static s32 igc_phy_setup_autoneg(struct igc_hw *hw); +static s32 igc_wait_autoneg(struct igc_hw *hw); + +/** + * igc_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return IGC_ERR_BLK_PHY_RESET (12). + */ +s32 igc_check_reset_block(struct igc_hw *hw) +{ + u32 manc; + + manc = rd32(IGC_MANC); + + return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ? + IGC_ERR_BLK_PHY_RESET : 0; +} + +/** + * igc_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + */ +s32 igc_get_phy_id(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + usleep_range(200, 500); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igc_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + */ +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + u16 i, phy_status; + s32 ret_val = 0; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igc_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + */ +void igc_power_up_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igc_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + */ +void igc_power_down_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + + /* Temporary workaround - should be removed when PHY will implement + * IEEE registers as properly + */ + /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + usleep_range(1000, 2000); +} + +/** + * igc_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + */ +s32 igc_check_downshift(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 phy_data, offset, mask; + s32 ret_val; + + switch (phy->type) { + case igc_phy_i225: + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + ret_val = 0; + goto out; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask) ? true : false; + +out: + return ret_val; +} + +/** + * igc_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + */ +s32 igc_phy_hw_reset(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + ret_val = igc_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + ctrl = rd32(IGC_CTRL); + wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(IGC_CTRL, ctrl); + wrfl(); + + usleep_range(1500, 2000); + + phy->ops.release(hw); + +out: + return ret_val; +} + +/** + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + */ +static s32 igc_copper_link_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 phy_ctrl; + s32 ret_val; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igc_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igc_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igc_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + */ +static s32 igc_wait_autoneg(struct igc_hw *hw) +{ + u16 i, phy_status; + s32 ret_val = 0; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igc_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + */ +static s32 igc_phy_setup_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 aneg_multigbt_an_ctrl = 0; + u16 mii_1000t_ctrl_reg = 0; + u16 mii_autoneg_adv_reg; + s32 ret_val; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && + hw->phy.id == I225_I_PHY_ID) { + /* Read the MULTI GBT AN Control Register - reg 7.32 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + &aneg_multigbt_an_ctrl); + + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* We do not allow the Phy to advertise 2500 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_2500_HALF) + hw_dbg("Advertise 2500mb Half duplex request denied!\n"); + + /* Do we want to advertise 2500 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_2500_FULL) { + hw_dbg("Advertise 2500mb Full duplex\n"); + aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS; + } else { + aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case igc_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in igc_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case igc_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && + hw->phy.id == I225_I_PHY_ID) + ret_val = phy->ops.write_reg(hw, + (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + aneg_multigbt_an_ctrl); + + return ret_val; +} + +/** + * igc_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -IGC_ERR_PHY (-2). + */ +s32 igc_setup_copper_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igc_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igc_config_collision_dist(hw); + ret_val = igc_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igc_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + */ +static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_READ)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + usleep_range(500, 1000); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + *data = (u16)mdic; + +out: + return ret_val; +} + +/** + * igc_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + */ +static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to write the desired data. + */ + mdic = (((u32)data) | + (offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_WRITE)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + usleep_range(500, 1000); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * __igc_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + */ +static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igc_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + */ +static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 *data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igc_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + */ +static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igc_write_phy_reg_gpy - Write GPY PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_write_phy_reg_mdic(hw, offset, data); + if (ret_val) + return ret_val; + hw->phy.ops.release(hw); + } else { + ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_reg_gpy - Read GPY PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is MMD to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_read_phy_reg_mdic(hw, offset, data); + if (ret_val) + return ret_val; + hw->phy.ops.release(hw); + } else { + ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} diff --git a/drivers/net/ethernet/intel/igc/igc_phy.h b/drivers/net/ethernet/intel/igc/igc_phy.h new file mode 100644 index 000000000000..25cba33de7e2 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_phy.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_PHY_H_ +#define _IGC_PHY_H_ + +#include "igc_mac.h" + +s32 igc_check_reset_block(struct igc_hw *hw); +s32 igc_phy_hw_reset(struct igc_hw *hw); +s32 igc_get_phy_id(struct igc_hw *hw); +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 igc_check_downshift(struct igc_hw *hw); +s32 igc_setup_copper_link(struct igc_hw *hw); +void igc_power_up_phy_copper(struct igc_hw *hw); +void igc_power_down_phy_copper(struct igc_hw *hw); +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data); + +#endif diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h new file mode 100644 index 000000000000..a1bd3216c906 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_REGS_H_ +#define _IGC_REGS_H_ + +/* General Register Descriptions */ +#define IGC_CTRL 0x00000 /* Device Control - RW */ +#define IGC_STATUS 0x00008 /* Device Status - RO */ +#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define IGC_MDIC 0x00020 /* MDI Control - RW */ +#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */ +#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ + +/* Internal Packet Buffer Size Registers */ +#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +/* NVM Register Descriptions */ +#define IGC_EERD 0x12014 /* EEprom mode read - RW */ +#define IGC_EEWR 0x12018 /* EEprom mode write - RW */ + +/* Flow Control Register Descriptions */ +#define IGC_FCAL 0x00028 /* FC Address Low - RW */ +#define IGC_FCAH 0x0002C /* FC Address High - RW */ +#define IGC_FCT 0x00030 /* FC Type - RW */ +#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */ +#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */ +#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */ +#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */ +#define IGC_FCSTS 0x02464 /* FC Status - RO */ + +/* PCIe Register Description */ +#define IGC_GCR 0x05B00 /* PCIe control- RW */ + +/* Semaphore registers */ +#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define IGC_SWSM 0x05B50 /* SW Semaphore */ +#define IGC_FWSM 0x05B54 /* FW Semaphore */ + +/* Function Active and Power State to MNG */ +#define IGC_FACTPS 0x05B30 + +/* Interrupt Register Description */ +#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */ +#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */ +#define IGC_ICS 0x01504 /* Intr Cause Set - WO */ +#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */ +#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */ +#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */ +/* Intr Throttle - RW */ +#define IGC_EITR(_n) (0x01680 + (0x4 * (_n))) +/* Interrupt Vector Allocation - RW */ +#define IGC_IVAR0 0x01700 +#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */ + +/* Interrupt Cause */ +#define IGC_ICRXPTC 0x04104 /* Rx Packet Timer Expire Count */ +#define IGC_ICRXATC 0x04108 /* Rx Absolute Timer Expire Count */ +#define IGC_ICTXPTC 0x0410C /* Tx Packet Timer Expire Count */ +#define IGC_ICTXATC 0x04110 /* Tx Absolute Timer Expire Count */ +#define IGC_ICTXQEC 0x04118 /* Tx Queue Empty Count */ +#define IGC_ICTXQMTC 0x0411C /* Tx Queue Min Threshold Count */ +#define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */ +#define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */ + +#define IGC_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ +#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define IGC_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ +#define IGC_RPTHC 0x04104 /* Rx Packets To Host */ +#define IGC_HGPTC 0x04118 /* Host Good Packets TX Count */ +#define IGC_HTCBDPC 0x04124 /* Host TX Circ.Breaker Drop Count */ + +/* MSI-X Table Register Descriptions */ +#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */ + +/* Receive Register Descriptions */ +#define IGC_RCTL 0x00100 /* Rx Control - RW */ +#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40)) +#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40)) +#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40)) +#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40)) +#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40)) +#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40)) +#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40)) +#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) +#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define IGC_RFCTL 0x05008 /* Receive Filter Control*/ +#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ +#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) +#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) + +/* Transmit Register Descriptions */ +#define IGC_TCTL 0x00400 /* Tx Control - RW */ +#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */ +#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40)) +#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40)) +#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40)) +#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40)) +#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40)) +#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +/* MMD Register Descriptions */ +#define IGC_MMDAC 13 /* MMD Access Control */ +#define IGC_MMDAAD 14 /* MMD Access Address/Data */ + +/* Good transmitted packets counter registers */ +#define IGC_PQGPTC(_n) (0x010014 + (0x100 * (_n))) + +/* Statistics Register Descriptions */ +#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define IGC_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */ +#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define IGC_COLC 0x04028 /* Collision Count - R/clr */ +#define IGC_DC 0x04030 /* Defer Count - R/clr */ +#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define IGC_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define IGC_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define IGC_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define IGC_IAC 0x04100 /* Interrupt Assertion Count */ +#define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define IGC_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define IGC_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define IGC_RPTHC 0x04104 /* Rx Packets To Host */ +#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */ +#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define IGC_LENERRS 0x04138 /* Length Errors Count */ +#define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ + +/* Management registers */ +#define IGC_MANC 0x05820 /* Management Control - RW */ + +/* Shadow Ram Write Register - RW */ +#define IGC_SRWR 0x12018 + +/* forward declaration */ +struct igc_hw; +u32 igc_rd32(struct igc_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!IGC_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igc_rd32(hw, reg)) + +#define wrfl() ((void)rd32(IGC_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) + +#endif diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 7722153c4ac2..1d4d1686909a 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -102,7 +102,7 @@ static struct pci_driver ixgb_driver = { MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 5414685189ce..ca6b0c458e4a 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -8,7 +8,8 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o + ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \ + ixgbe_xsk.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4fc906c6166b..7a7679e7be84 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -228,13 +228,17 @@ struct ixgbe_tx_buffer { struct ixgbe_rx_buffer { struct sk_buff *skb; dma_addr_t dma; - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; -#else - __u16 page_offset; -#endif - __u16 pagecnt_bias; + union { + struct { + struct page *page; + __u32 page_offset; + __u16 pagecnt_bias; + }; + struct { + void *addr; + u64 handle; + }; + }; }; struct ixgbe_queue_stats { @@ -271,6 +275,7 @@ enum ixgbe_ring_state_t { __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, __IXGBE_TX_XDP_RING, + __IXGBE_TX_DISABLED, }; #define ring_uses_build_skb(ring) \ @@ -347,6 +352,10 @@ struct ixgbe_ring { struct ixgbe_rx_queue_stats rx_stats; }; struct xdp_rxq_info xdp_rxq; + struct xdp_umem *xsk_umem; + struct zero_copy_allocator zca; /* ZC allocator anchor */ + u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */ + u16 rx_buf_len; } ____cacheline_internodealigned_in_smp; enum ixgbe_ring_f_enum { @@ -605,6 +614,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_EEE_ENABLED BIT(15) #define IXGBE_FLAG2_RX_LEGACY BIT(16) #define IXGBE_FLAG2_IPSEC_ENABLED BIT(17) +#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18) /* Tx fast path data */ int num_tx_queues; @@ -763,6 +773,11 @@ struct ixgbe_adapter { #ifdef CONFIG_XFRM_OFFLOAD struct ixgbe_ipsec *ipsec; #endif /* CONFIG_XFRM_OFFLOAD */ + + /* AF_XDP zero-copy */ + struct xdp_umem **xsk_umems; + u16 num_xsk_umems_used; + u16 num_xsk_umems; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -1003,15 +1018,24 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, struct sk_buff *skb); int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, struct ixgbe_ipsec_tx_data *itd); +void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf); +int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf); +int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf); #else -static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }; -static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }; -static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }; +static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { } +static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { } +static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { } static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) { }; + struct sk_buff *skb) { } static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, - struct ixgbe_ipsec_tx_data *itd) { return 0; }; + struct ixgbe_ipsec_tx_data *itd) { return 0; } +static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, + u32 vf) { } +static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, + u32 *mbuf, u32 vf) { return -EACCES; } +static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, + u32 *mbuf, u32 vf) { return -EACCES; } #endif /* CONFIG_XFRM_OFFLOAD */ #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index e5a8461fe6a9..732b1e6ecc43 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -136,6 +136,8 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0) "legacy-rx", +#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1) + "vf-ipsec", }; #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) @@ -3409,6 +3411,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev) if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; + if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED) + priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN; + return priv_flags; } @@ -3421,6 +3426,10 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) flags2 |= IXGBE_FLAG2_RX_LEGACY; + flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED; + if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN) + flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED; + if (flags2 != adapter->flags2) { adapter->flags2 = flags2; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index da4322e4daed..fd1b0546fd67 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -5,6 +5,11 @@ #include <net/xfrm.h> #include <crypto/aead.h> +#define IXGBE_IPSEC_KEY_BITS 160 +static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; + +static void ixgbe_ipsec_del_sa(struct xfrm_state *xs); + /** * ixgbe_ipsec_set_tx_sa - set the Tx SA registers * @hw: hw specific details @@ -113,7 +118,6 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) **/ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) { - struct ixgbe_ipsec *ipsec = adapter->ipsec; struct ixgbe_hw *hw = &adapter->hw; u32 buf[4] = {0, 0, 0, 0}; u16 idx; @@ -132,9 +136,6 @@ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); } - - ipsec->num_rx_sa = 0; - ipsec->num_tx_sa = 0; } /** @@ -290,6 +291,13 @@ static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter) /** * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset * @adapter: board private structure + * + * Reload the HW tables from the SW tables after they've been bashed + * by a chip reset. + * + * Any VF entries are removed from the SW and HW tables since either + * (a) the VF also gets reset on PF reset and will ask again for the + * offloads, or (b) the VF has been removed by a change in the num_vfs. **/ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { @@ -305,6 +313,28 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) ixgbe_ipsec_clear_hw_tables(adapter); ixgbe_ipsec_start_engine(adapter); + /* reload the Rx and Tx keys */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + struct rx_sa *r = &ipsec->rx_tbl[i]; + struct tx_sa *t = &ipsec->tx_tbl[i]; + + if (r->used) { + if (r->mode & IXGBE_RXTXMOD_VF) + ixgbe_ipsec_del_sa(r->xs); + else + ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi, + r->key, r->salt, + r->mode, r->iptbl_ind); + } + + if (t->used) { + if (t->mode & IXGBE_RXTXMOD_VF) + ixgbe_ipsec_del_sa(t->xs); + else + ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt); + } + } + /* reload the IP addrs */ for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) { struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i]; @@ -312,20 +342,6 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) if (ipsa->used) ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr); } - - /* reload the Rx and Tx keys */ - for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { - struct rx_sa *rsa = &ipsec->rx_tbl[i]; - struct tx_sa *tsa = &ipsec->tx_tbl[i]; - - if (rsa->used) - ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi, - rsa->key, rsa->salt, - rsa->mode, rsa->iptbl_ind); - - if (tsa->used) - ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt); - } } /** @@ -382,6 +398,8 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, rcu_read_lock(); hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, (__force u32)spi) { + if (rsa->mode & IXGBE_RXTXMOD_VF) + continue; if (spi == rsa->xs->id.spi && ((ip4 && *daddr == rsa->xs->id.daddr.a4) || (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, @@ -411,7 +429,6 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, struct net_device *dev = xs->xso.dev; unsigned char *key_data; char *alg_name = NULL; - const char aes_gcm_name[] = "rfc4106(gcm(aes))"; int key_len; if (!xs->aead) { @@ -439,9 +456,9 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, * we don't need to do any byteswapping. * 160 accounts for 16 byte key and 4 byte salt */ - if (key_len == 160) { + if (key_len == IXGBE_IPSEC_KEY_BITS) { *mysalt = ((u32 *)key_data)[4]; - } else if (key_len != 128) { + } else if (key_len != (IXGBE_IPSEC_KEY_BITS - (sizeof(*mysalt) * 8))) { netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); return -EINVAL; } else { @@ -676,6 +693,9 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) } else { struct tx_sa tsa; + if (adapter->num_vfs) + return -EOPNOTSUPP; + /* find the first unused index */ ret = ixgbe_ipsec_find_empty_idx(ipsec, false); if (ret < 0) { @@ -811,6 +831,226 @@ static const struct xfrmdev_ops ixgbe_xfrmdev_ops = { }; /** + * ixgbe_ipsec_vf_clear - clear the tables of data for a VF + * @adapter: board private structure + * @vf: VF id to be removed + **/ +void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_ipsec *ipsec = adapter->ipsec; + int i; + + /* search rx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) { + if (!ipsec->rx_tbl[i].used) + continue; + if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF && + ipsec->rx_tbl[i].vf == vf) + ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs); + } + + /* search tx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_tx_sa; i++) { + if (!ipsec->tx_tbl[i].used) + continue; + if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF && + ipsec->tx_tbl[i].vf == vf) + ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs); + } +} + +/** + * ixgbe_ipsec_vf_add_sa - translate VF request to SA add + * @adapter: board private structure + * @msgbuf: The message buffer + * @vf: the VF index + * + * Make up a new xs and algorithm info from the data sent by the VF. + * We only need to sketch in just enough to set up the HW offload. + * Put the resulting offload_handle into the return message to the VF. + * + * Returns 0 or error value + **/ +int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct xfrm_algo_desc *algo; + struct sa_mbx_msg *sam; + struct xfrm_state *xs; + size_t aead_len; + u16 sa_idx; + u32 pfsa; + int err; + + sam = (struct sa_mbx_msg *)(&msgbuf[1]); + if (!adapter->vfinfo[vf].trusted || + !(adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)) { + e_warn(drv, "VF %d attempted to add an IPsec SA\n", vf); + err = -EACCES; + goto err_out; + } + + /* Tx IPsec offload doesn't seem to work on this + * device, so block these requests for now. + */ + if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) { + err = -EOPNOTSUPP; + goto err_out; + } + + xs = kzalloc(sizeof(*xs), GFP_KERNEL); + if (unlikely(!xs)) { + err = -ENOMEM; + goto err_out; + } + + xs->xso.flags = sam->flags; + xs->id.spi = sam->spi; + xs->id.proto = sam->proto; + xs->props.family = sam->family; + if (xs->props.family == AF_INET6) + memcpy(&xs->id.daddr.a6, sam->addr, sizeof(xs->id.daddr.a6)); + else + memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4)); + xs->xso.dev = adapter->netdev; + + algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1); + if (unlikely(!algo)) { + err = -ENOENT; + goto err_xs; + } + + aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8; + xs->aead = kzalloc(aead_len, GFP_KERNEL); + if (unlikely(!xs->aead)) { + err = -ENOMEM; + goto err_xs; + } + + xs->props.ealgo = algo->desc.sadb_alg_id; + xs->geniv = algo->uinfo.aead.geniv; + xs->aead->alg_icv_len = IXGBE_IPSEC_AUTH_BITS; + xs->aead->alg_key_len = IXGBE_IPSEC_KEY_BITS; + memcpy(xs->aead->alg_key, sam->key, sizeof(sam->key)); + memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name)); + + /* set up the HW offload */ + err = ixgbe_ipsec_add_sa(xs); + if (err) + goto err_aead; + + pfsa = xs->xso.offload_handle; + if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) { + sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX; + ipsec->rx_tbl[sa_idx].vf = vf; + ipsec->rx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF; + } else { + sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX; + ipsec->tx_tbl[sa_idx].vf = vf; + ipsec->tx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF; + } + + msgbuf[1] = xs->xso.offload_handle; + + return 0; + +err_aead: + memset(xs->aead, 0, sizeof(*xs->aead)); + kfree(xs->aead); +err_xs: + memset(xs, 0, sizeof(*xs)); + kfree(xs); +err_out: + msgbuf[1] = err; + return err; +} + +/** + * ixgbe_ipsec_vf_del_sa - translate VF request to SA delete + * @adapter: board private structure + * @msgbuf: The message buffer + * @vf: the VF index + * + * Given the offload_handle sent by the VF, look for the related SA table + * entry and use its xs field to call for a delete of the SA. + * + * Note: We silently ignore requests to delete entries that are already + * set to unused because when a VF is set to "DOWN", the PF first + * gets a reset and clears all the VF's entries; then the VF's + * XFRM stack sends individual deletes for each entry, which the + * reset already removed. In the future it might be good to try to + * optimize this so not so many unnecessary delete messages are sent. + * + * Returns 0 or error value + **/ +int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct xfrm_state *xs; + u32 pfsa = msgbuf[1]; + u16 sa_idx; + + if (!adapter->vfinfo[vf].trusted) { + e_err(drv, "vf %d attempted to delete an SA\n", vf); + return -EPERM; + } + + if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) { + struct rx_sa *rsa; + + sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX; + if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) { + e_err(drv, "vf %d SA index %d out of range\n", + vf, sa_idx); + return -EINVAL; + } + + rsa = &ipsec->rx_tbl[sa_idx]; + + if (!rsa->used) + return 0; + + if (!(rsa->mode & IXGBE_RXTXMOD_VF) || + rsa->vf != vf) { + e_err(drv, "vf %d bad Rx SA index %d\n", vf, sa_idx); + return -ENOENT; + } + + xs = ipsec->rx_tbl[sa_idx].xs; + } else { + struct tx_sa *tsa; + + sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX; + if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) { + e_err(drv, "vf %d SA index %d out of range\n", + vf, sa_idx); + return -EINVAL; + } + + tsa = &ipsec->tx_tbl[sa_idx]; + + if (!tsa->used) + return 0; + + if (!(tsa->mode & IXGBE_RXTXMOD_VF) || + tsa->vf != vf) { + e_err(drv, "vf %d bad Tx SA index %d\n", vf, sa_idx); + return -ENOENT; + } + + xs = ipsec->tx_tbl[sa_idx].xs; + } + + ixgbe_ipsec_del_sa(xs); + + /* remove the xs that was made-up in the add request */ + memset(xs, 0, sizeof(*xs)); + kfree(xs); + + return 0; +} + +/** * ixgbe_ipsec_tx - setup Tx flags for ipsec offload * @tx_ring: outgoing context * @first: current data packet diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h index 9ef7faadda69..d2b64ff8eb4e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h @@ -26,6 +26,7 @@ enum ixgbe_ipsec_tbl_sel { #define IXGBE_RXMOD_PROTO_ESP 0x00000004 #define IXGBE_RXMOD_DECRYPT 0x00000008 #define IXGBE_RXMOD_IPV6 0x00000010 +#define IXGBE_RXTXMOD_VF 0x00000020 struct rx_sa { struct hlist_node hlist; @@ -37,6 +38,7 @@ struct rx_sa { u8 iptbl_ind; bool used; bool decrypt; + u32 vf; }; struct rx_ip_sa { @@ -49,8 +51,10 @@ struct tx_sa { struct xfrm_state *xs; u32 key[4]; u32 salt; + u32 mode; bool encrypt; bool used; + u32 vf; }; struct ixgbe_ipsec_tx_data { @@ -67,4 +71,13 @@ struct ixgbe_ipsec { struct tx_sa *tx_tbl; DECLARE_HASHTABLE(rx_sa_list, 10); }; + +struct sa_mbx_msg { + __be32 spi; + u8 flags; + u8 proto; + u16 family; + __be32 addr[4]; + u32 key[5]; +}; #endif /* _IXGBE_IPSEC_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index d361f570ca37..62e6499e4146 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1055,7 +1055,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) int txr_remaining = adapter->num_tx_queues; int xdp_remaining = adapter->num_xdp_queues; int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; - int err; + int err, i; /* only one q_vector if MSI-X is disabled. */ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) @@ -1097,6 +1097,21 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) xdp_idx += xqpv; } + for (i = 0; i < adapter->num_rx_queues; i++) { + if (adapter->rx_ring[i]) + adapter->rx_ring[i]->ring_idx = i; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]) + adapter->tx_ring[i]->ring_idx = i; + } + + for (i = 0; i < adapter->num_xdp_queues; i++) { + if (adapter->xdp_ring[i]) + adapter->xdp_ring[i]->ring_idx = i; + } + return 0; err_out: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6cdd58d9d461..0049a2becd7e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -34,12 +34,14 @@ #include <net/tc_act/tc_mirred.h> #include <net/vxlan.h> #include <net/mpls.h> +#include <net/xdp_sock.h> #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" #include "ixgbe_model.h" +#include "ixgbe_txrx_common.h" char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = @@ -159,7 +161,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); static struct workqueue_struct *ixgbe_wq; @@ -893,8 +895,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, } } -static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, - u64 qmask) +void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, + u64 qmask) { u32 mask; @@ -1673,9 +1675,9 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, * order to populate the hash, checksum, VLAN, timestamp, protocol, and * other fields within the skb. **/ -static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) +void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; u32 flags = rx_ring->q_vector->adapter->flags; @@ -1708,8 +1710,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, skb->protocol = eth_type_trans(skb, dev); } -static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb) +void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) { napi_gro_receive(&q_vector->napi, skb); } @@ -1868,9 +1870,9 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, * * Returns true if an error was encountered and skb was freed. **/ -static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) +bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { struct net_device *netdev = rx_ring->netdev; @@ -2186,14 +2188,6 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } -#define IXGBE_XDP_PASS 0 -#define IXGBE_XDP_CONSUMED BIT(0) -#define IXGBE_XDP_TX BIT(1) -#define IXGBE_XDP_REDIR BIT(2) - -static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, - struct xdp_frame *xdpf); - static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) @@ -3167,7 +3161,11 @@ int ixgbe_poll(struct napi_struct *napi, int budget) #endif ixgbe_for_each_ring(ring, q_vector->tx) { - if (!ixgbe_clean_tx_irq(q_vector, ring, budget)) + bool wd = ring->xsk_umem ? + ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) : + ixgbe_clean_tx_irq(q_vector, ring, budget); + + if (!wd) clean_complete = false; } @@ -3183,7 +3181,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget) per_ring_budget = budget; ixgbe_for_each_ring(ring, q_vector->rx) { - int cleaned = ixgbe_clean_rx_irq(q_vector, ring, + int cleaned = ring->xsk_umem ? + ixgbe_clean_rx_irq_zc(q_vector, ring, + per_ring_budget) : + ixgbe_clean_rx_irq(q_vector, ring, per_ring_budget); work_done += cleaned; @@ -3475,6 +3476,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, u32 txdctl = IXGBE_TXDCTL_ENABLE; u8 reg_idx = ring->reg_idx; + ring->xsk_umem = NULL; + if (ring_is_xdp(ring)) + ring->xsk_umem = ixgbe_xsk_umem(adapter, ring); + /* disable queue to avoid issues while updating state */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); IXGBE_WRITE_FLUSH(hw); @@ -3579,12 +3584,18 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) else mtqc |= IXGBE_MTQC_64VF; } else { - if (tcs > 4) + if (tcs > 4) { mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; - else if (tcs > 1) + } else if (tcs > 1) { mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; - else - mtqc = IXGBE_MTQC_64Q_1PB; + } else { + u8 max_txq = adapter->num_tx_queues + + adapter->num_xdp_queues; + if (max_txq > 63) + mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else + mtqc = IXGBE_MTQC_64Q_1PB; + } } IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); @@ -3707,10 +3718,27 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ - if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) + if (rx_ring->xsk_umem) { + u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr - + XDP_PACKET_HEADROOM; + + /* If the MAC support setting RXDCTL.RLPML, the + * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and + * RXDCTL.RLPML is set to the actual UMEM buffer + * size. If not, then we are stuck with a 1k buffer + * size resolution. In this case frames larger than + * the UMEM buffer size viewed in a 1k resolution will + * be dropped. + */ + if (hw->mac.type != ixgbe_mac_82599EB) + srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - else + } else { srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@ -4033,6 +4061,19 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_umem = ixgbe_xsk_umem(adapter, ring); + if (ring->xsk_umem) { + ring->zca.free = ixgbe_zca_free; + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_ZERO_COPY, + &ring->zca)); + + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + } + /* disable queue to avoid use of these values while updating state */ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; @@ -4082,6 +4123,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, #endif } + if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) { + u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr - + XDP_PACKET_HEADROOM; + + rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | + IXGBE_RXDCTL_RLPML_EN); + rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN; + + ring->rx_buf_len = xsk_buf_len; + } + /* initialize rx_buffer_info */ memset(ring->rx_buffer_info, 0, sizeof(struct ixgbe_rx_buffer) * ring->count); @@ -4095,7 +4147,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); - ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); + if (ring->xsk_umem) + ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring)); + else + ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); } static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) @@ -5175,6 +5230,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *filter; + u64 action; spin_lock(&adapter->fdir_perfect_lock); @@ -5183,12 +5239,17 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { + action = filter->action; + if (action != IXGBE_FDIR_DROP_QUEUE && action != 0) + action = + (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1; + ixgbe_fdir_write_perfect_filter_82599(hw, &filter->filter, filter->sw_idx, - (filter->action == IXGBE_FDIR_DROP_QUEUE) ? + (action == IXGBE_FDIR_DROP_QUEUE) ? IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[filter->action]->reg_idx); + adapter->rx_ring[action]->reg_idx); } spin_unlock(&adapter->fdir_perfect_lock); @@ -5203,6 +5264,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) u16 i = rx_ring->next_to_clean; struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_ring->xsk_umem) { + ixgbe_xsk_clean_rx_ring(rx_ring); + goto skip_free; + } + /* Free all the Rx ring sk_buffs */ while (i != rx_ring->next_to_alloc) { if (rx_buffer->skb) { @@ -5241,6 +5307,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) } } +skip_free: rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -5885,6 +5952,11 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) u16 i = tx_ring->next_to_clean; struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + if (tx_ring->xsk_umem) { + ixgbe_xsk_clean_tx_ring(tx_ring); + goto out; + } + while (i != tx_ring->next_to_use) { union ixgbe_adv_tx_desc *eop_desc, *tx_desc; @@ -5936,6 +6008,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) if (!ring_is_xdp(tx_ring)) netdev_tx_reset_queue(txring_txq(tx_ring)); +out: /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -6436,7 +6509,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); int ring_node = -1; - int size, err; + int size; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; @@ -6473,13 +6546,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->queue_index) < 0) goto err; - err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL); - if (err) { - xdp_rxq_info_unreg(&rx_ring->xdp_rxq); - goto err; - } - rx_ring->xdp_prog = adapter->xdp_prog; return 0; @@ -7777,6 +7843,33 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) } /** + * ixgbe_check_fw_error - Check firmware for errors + * @adapter: the adapter private structure + * + * Check firmware errors in register FWSM + */ +static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 fwsm; + + /* read fwsm.ext_err_ind register and log errors */ + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + + if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK || + !(fwsm & IXGBE_FWSM_FW_VAL_BIT)) + e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n", + fwsm); + + if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { + e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); + return true; + } + + return false; +} + +/** * ixgbe_service_task - manages and runs subtasks * @work: pointer to work_struct containing our data **/ @@ -7794,6 +7887,15 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } + if (ixgbe_check_fw_error(adapter)) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + unregister_netdev(adapter->netdev); + rtnl_unlock(); + } + ixgbe_service_event_complete(adapter); + return; + } if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { rtnl_lock(); adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; @@ -8068,9 +8170,6 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) return __ixgbe_maybe_stop_tx(tx_ring, size); } -#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ - IXGBE_TXD_CMD_RS) - static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, const u8 hdr_len) @@ -8423,8 +8522,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, } #endif -static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, - struct xdp_frame *xdpf) +int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, + struct xdp_frame *xdpf) { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; struct ixgbe_tx_buffer *tx_buffer; @@ -8646,6 +8745,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; + if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) + return NETDEV_TX_BUSY; return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); } @@ -10157,12 +10258,19 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; + case XDP_QUERY_XSK_UMEM: + return ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem, + xdp->xsk.queue_id); + case XDP_SETUP_XSK_UMEM: + return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem, + xdp->xsk.queue_id); + default: return -EINVAL; } } -static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) +void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) { /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. @@ -10192,6 +10300,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, if (unlikely(!ring)) return -ENXIO; + if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) + return -ENXIO; + for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; @@ -10253,8 +10364,162 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_features_check = ixgbe_features_check, .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, + .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit, }; +static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + unsigned long wait_delay, delay_interval; + struct ixgbe_hw *hw = &adapter->hw; + u8 reg_idx = tx_ring->reg_idx; + int wait_loop; + u32 txdctl; + + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + + /* delay mechanism from ixgbe_disable_tx */ + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; + + wait_loop = IXGBE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + + if (!(txdctl & IXGBE_TXDCTL_ENABLE)) + return; + } + + e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n"); +} + +static void ixgbe_disable_txr(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + set_bit(__IXGBE_TX_DISABLED, &tx_ring->state); + ixgbe_disable_txr_hw(adapter, tx_ring); +} + +static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) +{ + unsigned long wait_delay, delay_interval; + struct ixgbe_hw *hw = &adapter->hw; + u8 reg_idx = rx_ring->reg_idx; + int wait_loop; + u32 rxdctl; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + rxdctl |= IXGBE_RXDCTL_SWFLSH; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* delay mechanism from ixgbe_disable_rx */ + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; + + wait_loop = IXGBE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + + if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) + return; + } + + e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n"); +} + +static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring) +{ + memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); + memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); +} + +static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) +{ + memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); + memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); +} + +/** + * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings + * @adapter: adapter structure + * @ring: ring index + * + * This function disables a certain Rx/Tx/XDP Tx ring. The function + * assumes that the netdev is running. + **/ +void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) +{ + struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; + + rx_ring = adapter->rx_ring[ring]; + tx_ring = adapter->tx_ring[ring]; + xdp_ring = adapter->xdp_ring[ring]; + + ixgbe_disable_txr(adapter, tx_ring); + if (xdp_ring) + ixgbe_disable_txr(adapter, xdp_ring); + ixgbe_disable_rxr_hw(adapter, rx_ring); + + if (xdp_ring) + synchronize_sched(); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_disable(&rx_ring->q_vector->napi); + + ixgbe_clean_tx_ring(tx_ring); + if (xdp_ring) + ixgbe_clean_tx_ring(xdp_ring); + ixgbe_clean_rx_ring(rx_ring); + + ixgbe_reset_txr_stats(tx_ring); + if (xdp_ring) + ixgbe_reset_txr_stats(xdp_ring); + ixgbe_reset_rxr_stats(rx_ring); +} + +/** + * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings + * @adapter: adapter structure + * @ring: ring index + * + * This function enables a certain Rx/Tx/XDP Tx ring. The function + * assumes that the netdev is running. + **/ +void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) +{ + struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; + + rx_ring = adapter->rx_ring[ring]; + tx_ring = adapter->tx_ring[ring]; + xdp_ring = adapter->xdp_ring[ring]; + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_enable(&rx_ring->q_vector->napi); + + ixgbe_configure_tx_ring(adapter, tx_ring); + if (xdp_ring) + ixgbe_configure_tx_ring(adapter, xdp_ring); + ixgbe_configure_rx_ring(adapter, rx_ring); + + clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); + clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); +} + /** * ixgbe_enumerate_functions - Get the number of ports this device has * @adapter: adapter structure @@ -10693,6 +10958,11 @@ skip_sriov: if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) netdev->features |= NETIF_F_LRO; + if (ixgbe_check_fw_error(adapter)) { + err = -EIO; + goto err_sw_init; + } + /* make sure the EEPROM is good */ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { e_dev_err("The EEPROM Checksum Is Not Valid\n"); @@ -11052,8 +11322,6 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, /* Free device reference count */ pci_dev_put(vfdev); } - - pci_cleanup_aer_uncorrect_error_status(pdev); } /* @@ -11103,7 +11371,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); pci_ers_result_t result; - int err; if (pci_enable_device_mem(pdev)) { e_err(probe, "Cannot re-enable PCI device after reset.\n"); @@ -11123,13 +11390,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) result = PCI_ERS_RESULT_RECOVERED; } - err = pci_cleanup_aer_uncorrect_error_status(pdev); - if (err) { - e_dev_err("pci_cleanup_aer_uncorrect_error_status " - "failed 0x%0x\n", err); - /* non-fatal, continue */ - } - return result; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index e085b6520dac..a148534d7256 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -50,6 +50,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -80,6 +81,10 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_UPDATE_XCAST_MODE 0x0c +/* mailbox API, version 1.4 VF requests */ +#define IXGBE_VF_IPSEC_ADD 0x0d +#define IXGBE_VF_IPSEC_DEL 0x0e + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 3c6f01c41b78..af25a8fffeb8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -496,6 +496,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: /* Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are * disabled @@ -728,6 +729,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) /* reset multicast table array for vf */ adapter->vfinfo[vf].num_vf_mc_hashes = 0; + /* clear any ipsec table info */ + ixgbe_ipsec_vf_clear(adapter, vf); + /* Flush and reset the mta with the new values */ ixgbe_set_rx_mode(adapter->netdev); @@ -1000,6 +1004,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -1025,6 +1030,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: break; default: return -1; @@ -1065,6 +1071,7 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: break; @@ -1097,6 +1104,7 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: break; @@ -1122,8 +1130,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, /* promisc introduced in 1.3 version */ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) return -EOPNOTSUPP; - /* Fall threw */ + /* Fall through */ case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: break; default: return -EOPNOTSUPP; @@ -1249,6 +1258,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_UPDATE_XCAST_MODE: retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); break; + case IXGBE_VF_IPSEC_ADD: + retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf); + break; + case IXGBE_VF_IPSEC_DEL: + retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf); + break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h new file mode 100644 index 000000000000..53d4089f5644 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2018 Intel Corporation. */ + +#ifndef _IXGBE_TXRX_COMMON_H_ +#define _IXGBE_TXRX_COMMON_H_ + +#define IXGBE_XDP_PASS 0 +#define IXGBE_XDP_CONSUMED BIT(0) +#define IXGBE_XDP_TX BIT(1) +#define IXGBE_XDP_REDIR BIT(2) + +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ + IXGBE_TXD_CMD_RS) + +int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, + struct xdp_frame *xdpf); +bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb); +void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring); +void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask); + +void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring); +void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring); + +struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring); +int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem, + u16 qid); +int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, + u16 qid); + +void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); + +void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count); +int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + const int budget); +void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring); +bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *tx_ring, int napi_budget); +int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id); +void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring); + +#endif /* #define _IXGBE_TXRX_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 41bcbb337e83..84f2dba39e36 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -924,6 +924,9 @@ struct ixgbe_nvm_version { /* Firmware Semaphore Register */ #define IXGBE_FWSM_MODE_MASK 0xE #define IXGBE_FWSM_FW_MODE_PT 0x4 +#define IXGBE_FWSM_FW_NVM_RECOVERY_MODE BIT(5) +#define IXGBE_FWSM_EXT_ERR_IND_MASK 0x01F80000 +#define IXGBE_FWSM_FW_VAL_BIT BIT(15) /* ARC Subsystem registers */ #define IXGBE_HICR 0x15F00 @@ -3461,6 +3464,7 @@ struct ixgbe_mac_operations { const char *); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + bool (*fw_recovery_mode)(struct ixgbe_hw *hw); void (*disable_rx)(struct ixgbe_hw *hw); void (*enable_rx)(struct ixgbe_hw *hw); void (*set_source_address_pruning)(struct ixgbe_hw *, bool, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index a8148c7126e5..10dbaf4f6e80 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1247,6 +1247,20 @@ static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) return 0; } +/** + * ixgbe_fw_recovery_mode - Check FW NVM recovery mode + * @hw: pointer t hardware structure + * + * Returns true if in FW NVM recovery mode. + */ +static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw) +{ + u32 fwsm; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE); +} + /** ixgbe_disable_rx_x550 - Disable RX unit * * Enables the Rx DMA unit for x550 @@ -3816,6 +3830,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ .get_thermal_sensor_data = NULL, \ .init_thermal_sensor_thresh = NULL, \ + .fw_recovery_mode = &ixgbe_fw_recovery_mode_X550, \ .enable_rx = &ixgbe_enable_rx_generic, \ .disable_rx = &ixgbe_disable_rx_x550, \ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c new file mode 100644 index 000000000000..65c3e2c979d4 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -0,0 +1,801 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Intel Corporation. */ + +#include <linux/bpf_trace.h> +#include <net/xdp_sock.h> +#include <net/xdp.h> + +#include "ixgbe.h" +#include "ixgbe_txrx_common.h" + +struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + bool xdp_on = READ_ONCE(adapter->xdp_prog); + int qid = ring->ring_idx; + + if (!adapter->xsk_umems || !adapter->xsk_umems[qid] || + qid >= adapter->num_xsk_umems || !xdp_on) + return NULL; + + return adapter->xsk_umems[qid]; +} + +static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter) +{ + if (adapter->xsk_umems) + return 0; + + adapter->num_xsk_umems_used = 0; + adapter->num_xsk_umems = adapter->num_rx_queues; + adapter->xsk_umems = kcalloc(adapter->num_xsk_umems, + sizeof(*adapter->xsk_umems), + GFP_KERNEL); + if (!adapter->xsk_umems) { + adapter->num_xsk_umems = 0; + return -ENOMEM; + } + + return 0; +} + +static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter, + struct xdp_umem *umem, + u16 qid) +{ + int err; + + err = ixgbe_alloc_xsk_umems(adapter); + if (err) + return err; + + adapter->xsk_umems[qid] = umem; + adapter->num_xsk_umems_used++; + + return 0; +} + +static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid) +{ + adapter->xsk_umems[qid] = NULL; + adapter->num_xsk_umems_used--; + + if (adapter->num_xsk_umems == 0) { + kfree(adapter->xsk_umems); + adapter->xsk_umems = NULL; + adapter->num_xsk_umems = 0; + } +} + +static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter, + struct xdp_umem *umem) +{ + struct device *dev = &adapter->pdev->dev; + unsigned int i, j; + dma_addr_t dma; + + for (i = 0; i < umem->npgs; i++) { + dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); + if (dma_mapping_error(dev, dma)) + goto out_unmap; + + umem->pages[i].dma = dma; + } + + return 0; + +out_unmap: + for (j = 0; j < i; j++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); + umem->pages[i].dma = 0; + } + + return -1; +} + +static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter, + struct xdp_umem *umem) +{ + struct device *dev = &adapter->pdev->dev; + unsigned int i; + + for (i = 0; i < umem->npgs; i++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); + + umem->pages[i].dma = 0; + } +} + +static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, + struct xdp_umem *umem, + u16 qid) +{ + struct xdp_umem_fq_reuse *reuseq; + bool if_running; + int err; + + if (qid >= adapter->num_rx_queues) + return -EINVAL; + + if (adapter->xsk_umems) { + if (qid >= adapter->num_xsk_umems) + return -EINVAL; + if (adapter->xsk_umems[qid]) + return -EBUSY; + } + + reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count); + if (!reuseq) + return -ENOMEM; + + xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); + + err = ixgbe_xsk_umem_dma_map(adapter, umem); + if (err) + return err; + + if_running = netif_running(adapter->netdev) && + READ_ONCE(adapter->xdp_prog); + + if (if_running) + ixgbe_txrx_ring_disable(adapter, qid); + + err = ixgbe_add_xsk_umem(adapter, umem, qid); + + if (if_running) + ixgbe_txrx_ring_enable(adapter, qid); + + return err; +} + +static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) +{ + bool if_running; + + if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems || + !adapter->xsk_umems[qid]) + return -EINVAL; + + if_running = netif_running(adapter->netdev) && + READ_ONCE(adapter->xdp_prog); + + if (if_running) + ixgbe_txrx_ring_disable(adapter, qid); + + ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]); + ixgbe_remove_xsk_umem(adapter, qid); + + if (if_running) + ixgbe_txrx_ring_enable(adapter, qid); + + return 0; +} + +int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem, + u16 qid) +{ + if (qid >= adapter->num_rx_queues) + return -EINVAL; + + if (adapter->xsk_umems) { + if (qid >= adapter->num_xsk_umems) + return -EINVAL; + *umem = adapter->xsk_umems[qid]; + return 0; + } + + *umem = NULL; + return 0; +} + +int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, + u16 qid) +{ + return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) : + ixgbe_xsk_umem_disable(adapter, qid); +} + +static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + struct xdp_buff *xdp) +{ + int err, result = IXGBE_XDP_PASS; + struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, xdp); + xdp->handle += xdp->data - xdp->data_hard_start; + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) { + result = IXGBE_XDP_CONSUMED; + break; + } + result = ixgbe_xmit_xdp_ring(adapter, xdpf); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fallthrough */ + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + case XDP_DROP: + result = IXGBE_XDP_CONSUMED; + break; + } + rcu_read_unlock(); + return result; +} + +static struct +ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring, + unsigned int size) +{ + struct ixgbe_rx_buffer *bi; + + bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + bi->dma, 0, + size, + DMA_BIDIRECTIONAL); + + return bi; +} + +static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *obi) +{ + unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; + u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + u16 nta = rx_ring->next_to_alloc; + struct ixgbe_rx_buffer *nbi; + + nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc]; + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + nbi->dma = obi->dma & mask; + nbi->dma += hr; + + nbi->addr = (void *)((unsigned long)obi->addr & mask); + nbi->addr += hr; + + nbi->handle = obi->handle & mask; + nbi->handle += rx_ring->xsk_umem->headroom; + + obi->addr = NULL; + obi->skb = NULL; +} + +void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) +{ + struct ixgbe_rx_buffer *bi; + struct ixgbe_ring *rx_ring; + u64 hr, mask; + u16 nta; + + rx_ring = container_of(alloc, struct ixgbe_ring, zca); + hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + mask = rx_ring->xsk_umem->chunk_mask; + + nta = rx_ring->next_to_alloc; + bi = rx_ring->rx_buffer_info; + + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + handle &= mask; + + bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); + bi->addr += hr; + + bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; +} + +static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + void *addr = bi->addr; + u64 handle, hr; + + if (addr) + return true; + + if (!xsk_umem_peek_addr(umem, &handle)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr(umem); + return true; +} + +static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + u64 handle, hr; + + if (!xsk_umem_peek_addr_rq(umem, &handle)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + handle &= rx_ring->xsk_umem->chunk_mask; + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr_rq(umem); + return true; +} + +static __always_inline bool +__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count, + bool alloc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi)) +{ + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + bool ok = true; + + /* nothing to do */ + if (!cleaned_count) + return true; + + rx_desc = IXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!alloc(rx_ring, bi)) { + ok = false; + break; + } + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + rx_ring->rx_buf_len, + DMA_BIDIRECTIONAL); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IXGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } + + return ok; +} + +void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) +{ + __ixgbe_alloc_rx_buffers_zc(rx_ring, count, + ixgbe_alloc_buffer_slow_zc); +} + +static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring, + u16 count) +{ + return __ixgbe_alloc_rx_buffers_zc(rx_ring, count, + ixgbe_alloc_buffer_zc); +} + +static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + struct sk_buff *skb; + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + + ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + return skb; +} + +static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + prefetch(IXGBE_RX_DESC(rx_ring, ntc)); +} + +int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + const int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct ixgbe_adapter *adapter = q_vector->adapter; + u16 cleaned_count = ixgbe_desc_unused(rx_ring); + unsigned int xdp_res, xdp_xmit = 0; + bool failure = false; + struct sk_buff *skb; + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; + + while (likely(total_rx_packets < budget)) { + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *bi; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { + failure = failure || + !ixgbe_alloc_rx_buffers_fast_zc(rx_ring, + cleaned_count); + cleaned_count = 0; + } + + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + bi = ixgbe_get_rx_buffer_zc(rx_ring, size); + + if (unlikely(!ixgbe_test_staterr(rx_desc, + IXGBE_RXD_STAT_EOP))) { + struct ixgbe_rx_buffer *next_bi; + + ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + ixgbe_inc_ntc(rx_ring); + next_bi = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + next_bi->skb = ERR_PTR(-EINVAL); + continue; + } + + if (unlikely(bi->skb)) { + ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + ixgbe_inc_ntc(rx_ring); + continue; + } + + xdp.data = bi->addr; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = xdp.data + size; + xdp.handle = bi->handle; + + xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp); + + if (xdp_res) { + if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { + xdp_xmit |= xdp_res; + bi->addr = NULL; + bi->skb = NULL; + } else { + ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + } + total_rx_packets++; + total_rx_bytes += size; + + cleaned_count++; + ixgbe_inc_ntc(rx_ring); + continue; + } + + /* XDP_PASS path */ + skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp); + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + break; + } + + cleaned_count++; + ixgbe_inc_ntc(rx_ring); + + if (eth_skb_pad(skb)) + continue; + + total_rx_bytes += skb->len; + total_rx_packets++; + + ixgbe_process_skb_fields(rx_ring, rx_desc, skb); + ixgbe_rx_skb(q_vector, skb); + } + + if (xdp_xmit & IXGBE_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & IXGBE_XDP_TX) { + struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); + } + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return failure ? budget : (int)total_rx_packets; +} + +void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i]; + + while (i != rx_ring->next_to_alloc) { + xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle); + i++; + bi++; + if (i == rx_ring->count) { + i = 0; + bi = rx_ring->rx_buffer_info; + } + } +} + +static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) +{ + union ixgbe_adv_tx_desc *tx_desc = NULL; + struct ixgbe_tx_buffer *tx_bi; + bool work_done = true; + u32 len, cmd_type; + dma_addr_t dma; + + while (budget-- > 0) { + if (unlikely(!ixgbe_desc_unused(xdp_ring))) { + work_done = false; + break; + } + + if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) + break; + + dma_sync_single_for_device(xdp_ring->dev, dma, len, + DMA_BIDIRECTIONAL); + + tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; + tx_bi->bytecount = len; + tx_bi->xdpf = NULL; + + tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS; + cmd_type |= len | IXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); + + xdp_ring->next_to_use++; + if (xdp_ring->next_to_use == xdp_ring->count) + xdp_ring->next_to_use = 0; + } + + if (tx_desc) { + ixgbe_xdp_ring_update_tail(xdp_ring); + xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + } + + return !!budget && work_done; +} + +static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *tx_bi) +{ + xdp_return_frame(tx_bi->xdpf); + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_bi, dma), + dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_bi, len, 0); +} + +bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *tx_ring, int napi_budget) +{ + unsigned int total_packets = 0, total_bytes = 0; + u32 i = tx_ring->next_to_clean, xsk_frames = 0; + unsigned int budget = q_vector->tx.work_limit; + struct xdp_umem *umem = tx_ring->xsk_umem; + union ixgbe_adv_tx_desc *tx_desc; + struct ixgbe_tx_buffer *tx_bi; + bool xmit_done; + + tx_bi = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) + break; + + total_bytes += tx_bi->bytecount; + total_packets += tx_bi->gso_segs; + + if (tx_bi->xdpf) + ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + total_bytes += tx_bi->bytecount; + + tx_bi++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_bi = tx_ring->tx_buffer_info; + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); + + xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); + return budget > 0 && xmit_done; +} + +int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ring *ring; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!READ_ONCE(adapter->xdp_prog)) + return -ENXIO; + + if (qid >= adapter->num_xdp_queues) + return -ENXIO; + + if (!adapter->xsk_umems || !adapter->xsk_umems[qid]) + return -ENXIO; + + ring = adapter->xdp_ring[qid]; + if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { + u64 eics = BIT_ULL(ring->q_vector->v_idx); + + ixgbe_irq_rearm_queues(adapter, eics); + } + + return 0; +} + +void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring) +{ + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; + struct xdp_umem *umem = tx_ring->xsk_umem; + struct ixgbe_tx_buffer *tx_bi; + u32 xsk_frames = 0; + + while (ntc != ntu) { + tx_bi = &tx_ring->tx_buffer_info[ntc]; + + if (tx_bi->xdpf) + ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + + ntc++; + if (ntc == tx_ring->count) + ntc = 0; + } + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); +} diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile index aba1e6a37a6a..297d0f0858b5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/Makefile +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -10,4 +10,5 @@ ixgbevf-objs := vf.o \ mbx.o \ ethtool.o \ ixgbevf_main.o +ixgbevf-$(CONFIG_XFRM_OFFLOAD) += ipsec.o diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 700d8eb2f6f8..6bace746eaac 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -133,9 +133,14 @@ typedef u32 ixgbe_link_speed; #define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ #define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ #define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +#define IXGBE_RXDADV_STAT_SECP 0x00020000 /* IPsec/MACsec pkt found */ #define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F #define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ #define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 #define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 #define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 @@ -229,7 +234,7 @@ union ixgbe_adv_rx_desc { /* Context descriptors */ struct ixgbe_adv_tx_context_desc { __le32 vlan_macip_lens; - __le32 seqnum_seed; + __le32 fceof_saidx; __le32 type_tucmd_mlhl; __le32 mss_l4len_idx; }; @@ -250,9 +255,12 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 /* ESP Encrypt Enable */ #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ #define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ IXGBE_ADVTXD_POPTS_SHIFT) #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 631c91046f39..5399787e07af 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -55,6 +55,8 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = { IXGBEVF_STAT("alloc_rx_page", alloc_rx_page), IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed), IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + IXGBEVF_STAT("tx_ipsec", tx_ipsec), + IXGBEVF_STAT("rx_ipsec", rx_ipsec), }; #define IXGBEVF_QUEUE_STATS_LEN ( \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c new file mode 100644 index 000000000000..e8a3231be0bf --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -0,0 +1,670 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ + +#include "ixgbevf.h" +#include <net/xfrm.h> +#include <crypto/aead.h> + +#define IXGBE_IPSEC_KEY_BITS 160 +static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; + +/** + * ixgbevf_ipsec_set_pf_sa - ask the PF to set up an SA + * @adapter: board private structure + * @xs: xfrm info to be sent to the PF + * + * Returns: positive offload handle from the PF, or negative error code + **/ +static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter, + struct xfrm_state *xs) +{ + u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 }; + struct ixgbe_hw *hw = &adapter->hw; + struct sa_mbx_msg *sam; + int ret; + + /* send the important bits to the PF */ + sam = (struct sa_mbx_msg *)(&msgbuf[1]); + sam->flags = xs->xso.flags; + sam->spi = xs->id.spi; + sam->proto = xs->id.proto; + sam->family = xs->props.family; + + if (xs->props.family == AF_INET6) + memcpy(sam->addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6)); + else + memcpy(sam->addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4)); + memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key)); + + msgbuf[0] = IXGBE_VF_IPSEC_ADD; + + spin_lock_bh(&adapter->mbx_lock); + + ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); + if (ret) + goto out; + + ret = hw->mbx.ops.read_posted(hw, msgbuf, 2); + if (ret) + goto out; + + ret = (int)msgbuf[1]; + if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0) + ret = -1; + +out: + spin_unlock_bh(&adapter->mbx_lock); + + return ret; +} + +/** + * ixgbevf_ipsec_del_pf_sa - ask the PF to delete an SA + * @adapter: board private structure + * @pfsa: sa index returned from PF when created, -1 for all + * + * Returns: 0 on success, or negative error code + **/ +static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 msgbuf[2]; + int err; + + memset(msgbuf, 0, sizeof(msgbuf)); + msgbuf[0] = IXGBE_VF_IPSEC_DEL; + msgbuf[1] = (u32)pfsa; + + spin_lock_bh(&adapter->mbx_lock); + + err = hw->mbx.ops.write_posted(hw, msgbuf, 2); + if (err) + goto out; + + err = hw->mbx.ops.read_posted(hw, msgbuf, 2); + if (err) + goto out; + +out: + spin_unlock_bh(&adapter->mbx_lock); + return err; +} + +/** + * ixgbevf_ipsec_restore - restore the IPsec HW settings after a reset + * @adapter: board private structure + * + * Reload the HW tables from the SW tables after they've been bashed + * by a chip reset. While we're here, make sure any stale VF data is + * removed, since we go through reset when num_vfs changes. + **/ +void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct net_device *netdev = adapter->netdev; + int i; + + if (!(adapter->netdev->features & NETIF_F_HW_ESP)) + return; + + /* reload the Rx and Tx keys */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + struct rx_sa *r = &ipsec->rx_tbl[i]; + struct tx_sa *t = &ipsec->tx_tbl[i]; + int ret; + + if (r->used) { + ret = ixgbevf_ipsec_set_pf_sa(adapter, r->xs); + if (ret < 0) + netdev_err(netdev, "reload rx_tbl[%d] failed = %d\n", + i, ret); + } + + if (t->used) { + ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs); + if (ret < 0) + netdev_err(netdev, "reload tx_tbl[%d] failed = %d\n", + i, ret); + } + } +} + +/** + * ixgbevf_ipsec_find_empty_idx - find the first unused security parameter index + * @ipsec: pointer to IPsec struct + * @rxtable: true if we need to look in the Rx table + * + * Returns the first unused index in either the Rx or Tx SA table + **/ +static +int ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec *ipsec, bool rxtable) +{ + u32 i; + + if (rxtable) { + if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT) + return -ENOSPC; + + /* search rx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + if (!ipsec->rx_tbl[i].used) + return i; + } + } else { + if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT) + return -ENOSPC; + + /* search tx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + if (!ipsec->tx_tbl[i].used) + return i; + } + } + + return -ENOSPC; +} + +/** + * ixgbevf_ipsec_find_rx_state - find the state that matches + * @ipsec: pointer to IPsec struct + * @daddr: inbound address to match + * @proto: protocol to match + * @spi: SPI to match + * @ip4: true if using an IPv4 address + * + * Returns a pointer to the matching SA state information + **/ +static +struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, + __be32 *daddr, u8 proto, + __be32 spi, bool ip4) +{ + struct xfrm_state *ret = NULL; + struct rx_sa *rsa; + + rcu_read_lock(); + hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, + (__force u32)spi) { + if (spi == rsa->xs->id.spi && + ((ip4 && *daddr == rsa->xs->id.daddr.a4) || + (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, + sizeof(rsa->xs->id.daddr.a6)))) && + proto == rsa->xs->id.proto) { + ret = rsa->xs; + xfrm_state_hold(ret); + break; + } + } + rcu_read_unlock(); + return ret; +} + +/** + * ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol + * @xs: pointer to xfrm_state struct + * @mykey: pointer to key array to populate + * @mysalt: pointer to salt value to populate + * + * This copies the protocol keys and salt to our own data tables. The + * 82599 family only supports the one algorithm. + **/ +static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, + u32 *mykey, u32 *mysalt) +{ + struct net_device *dev = xs->xso.dev; + unsigned char *key_data; + char *alg_name = NULL; + int key_len; + + if (!xs->aead) { + netdev_err(dev, "Unsupported IPsec algorithm\n"); + return -EINVAL; + } + + if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) { + netdev_err(dev, "IPsec offload requires %d bit authentication\n", + IXGBE_IPSEC_AUTH_BITS); + return -EINVAL; + } + + key_data = &xs->aead->alg_key[0]; + key_len = xs->aead->alg_key_len; + alg_name = xs->aead->alg_name; + + if (strcmp(alg_name, aes_gcm_name)) { + netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", + aes_gcm_name); + return -EINVAL; + } + + /* The key bytes come down in a big endian array of bytes, so + * we don't need to do any byte swapping. + * 160 accounts for 16 byte key and 4 byte salt + */ + if (key_len > IXGBE_IPSEC_KEY_BITS) { + *mysalt = ((u32 *)key_data)[4]; + } else if (key_len == IXGBE_IPSEC_KEY_BITS) { + *mysalt = 0; + } else { + netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); + return -EINVAL; + } + memcpy(mykey, key_data, 16); + + return 0; +} + +/** + * ixgbevf_ipsec_add_sa - program device with a security association + * @xs: pointer to transformer state struct + **/ +static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) +{ + struct net_device *dev = xs->xso.dev; + struct ixgbevf_adapter *adapter = netdev_priv(dev); + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + u16 sa_idx; + int ret; + + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n", + xs->id.proto); + return -EINVAL; + } + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + struct rx_sa rsa; + + if (xs->calg) { + netdev_err(dev, "Compression offload not supported\n"); + return -EINVAL; + } + + /* find the first unused index */ + ret = ixgbevf_ipsec_find_empty_idx(ipsec, true); + if (ret < 0) { + netdev_err(dev, "No space for SA in Rx table!\n"); + return ret; + } + sa_idx = (u16)ret; + + memset(&rsa, 0, sizeof(rsa)); + rsa.used = true; + rsa.xs = xs; + + if (rsa.xs->id.proto & IPPROTO_ESP) + rsa.decrypt = xs->ealg || xs->aead; + + /* get the key and salt */ + ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); + if (ret) { + netdev_err(dev, "Failed to get key data for Rx SA table\n"); + return ret; + } + + /* get ip for rx sa table */ + if (xs->props.family == AF_INET6) + memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16); + else + memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4); + + rsa.mode = IXGBE_RXMOD_VALID; + if (rsa.xs->id.proto & IPPROTO_ESP) + rsa.mode |= IXGBE_RXMOD_PROTO_ESP; + if (rsa.decrypt) + rsa.mode |= IXGBE_RXMOD_DECRYPT; + if (rsa.xs->props.family == AF_INET6) + rsa.mode |= IXGBE_RXMOD_IPV6; + + ret = ixgbevf_ipsec_set_pf_sa(adapter, xs); + if (ret < 0) + return ret; + rsa.pfsa = ret; + + /* the preparations worked, so save the info */ + memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa)); + + xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX; + + ipsec->num_rx_sa++; + + /* hash the new entry for faster search in Rx path */ + hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, + (__force u32)rsa.xs->id.spi); + } else { + struct tx_sa tsa; + + /* find the first unused index */ + ret = ixgbevf_ipsec_find_empty_idx(ipsec, false); + if (ret < 0) { + netdev_err(dev, "No space for SA in Tx table\n"); + return ret; + } + sa_idx = (u16)ret; + + memset(&tsa, 0, sizeof(tsa)); + tsa.used = true; + tsa.xs = xs; + + if (xs->id.proto & IPPROTO_ESP) + tsa.encrypt = xs->ealg || xs->aead; + + ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); + if (ret) { + netdev_err(dev, "Failed to get key data for Tx SA table\n"); + memset(&tsa, 0, sizeof(tsa)); + return ret; + } + + ret = ixgbevf_ipsec_set_pf_sa(adapter, xs); + if (ret < 0) + return ret; + tsa.pfsa = ret; + + /* the preparations worked, so save the info */ + memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa)); + + xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX; + + ipsec->num_tx_sa++; + } + + return 0; +} + +/** + * ixgbevf_ipsec_del_sa - clear out this specific SA + * @xs: pointer to transformer state struct + **/ +static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) +{ + struct net_device *dev = xs->xso.dev; + struct ixgbevf_adapter *adapter = netdev_priv(dev); + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + u16 sa_idx; + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; + + if (!ipsec->rx_tbl[sa_idx].used) { + netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n", + sa_idx, xs->xso.offload_handle); + return; + } + + ixgbevf_ipsec_del_pf_sa(adapter, ipsec->rx_tbl[sa_idx].pfsa); + hash_del_rcu(&ipsec->rx_tbl[sa_idx].hlist); + memset(&ipsec->rx_tbl[sa_idx], 0, sizeof(struct rx_sa)); + ipsec->num_rx_sa--; + } else { + sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; + + if (!ipsec->tx_tbl[sa_idx].used) { + netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n", + sa_idx, xs->xso.offload_handle); + return; + } + + ixgbevf_ipsec_del_pf_sa(adapter, ipsec->tx_tbl[sa_idx].pfsa); + memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa)); + ipsec->num_tx_sa--; + } +} + +/** + * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload + * @skb: current data packet + * @xs: pointer to transformer state struct + **/ +static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + if (xs->props.family == AF_INET) { + /* Offload with IPv4 options is not supported yet */ + if (ip_hdr(skb)->ihl != 5) + return false; + } else { + /* Offload with IPv6 extension headers is not support yet */ + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + } + + return true; +} + +static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = { + .xdo_dev_state_add = ixgbevf_ipsec_add_sa, + .xdo_dev_state_delete = ixgbevf_ipsec_del_sa, + .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok, +}; + +/** + * ixgbevf_ipsec_tx - setup Tx flags for IPsec offload + * @tx_ring: outgoing context + * @first: current data packet + * @itd: ipsec Tx data for later use in building context descriptor + **/ +int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd) +{ + struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct xfrm_state *xs; + struct tx_sa *tsa; + u16 sa_idx; + + if (unlikely(!first->skb->sp->len)) { + netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", + __func__, first->skb->sp->len); + return 0; + } + + xs = xfrm_input_state(first->skb); + if (unlikely(!xs)) { + netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n", + __func__, xs); + return 0; + } + + sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; + if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) { + netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", + __func__, sa_idx, xs->xso.offload_handle); + return 0; + } + + tsa = &ipsec->tx_tbl[sa_idx]; + if (unlikely(!tsa->used)) { + netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n", + __func__, sa_idx); + return 0; + } + + itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX; + + first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM; + + if (xs->id.proto == IPPROTO_ESP) { + itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | + IXGBE_ADVTXD_TUCMD_L4T_TCP; + if (first->protocol == htons(ETH_P_IP)) + itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4; + + /* The actual trailer length is authlen (16 bytes) plus + * 2 bytes for the proto and the padlen values, plus + * padlen bytes of padding. This ends up not the same + * as the static value found in xs->props.trailer_len (21). + * + * ... but if we're doing GSO, don't bother as the stack + * doesn't add a trailer for those. + */ + if (!skb_is_gso(first->skb)) { + /* The "correct" way to get the auth length would be + * to use + * authlen = crypto_aead_authsize(xs->data); + * but since we know we only have one size to worry + * about * we can let the compiler use the constant + * and save us a few CPU cycles. + */ + const int authlen = IXGBE_IPSEC_AUTH_BITS / 8; + struct sk_buff *skb = first->skb; + u8 padlen; + int ret; + + ret = skb_copy_bits(skb, skb->len - (authlen + 2), + &padlen, 1); + if (unlikely(ret)) + return 0; + itd->trailer_len = authlen + 2 + padlen; + } + } + if (tsa->encrypt) + itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN; + + return 1; +} + +/** + * ixgbevf_ipsec_rx - decode IPsec bits from Rx descriptor + * @rx_ring: receiving ring + * @rx_desc: receive data descriptor + * @skb: current data packet + * + * Determine if there was an IPsec encapsulation noticed, and if so set up + * the resulting status for later in the receive stack. + **/ +void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev); + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH | + IXGBE_RXDADV_PKTTYPE_IPSEC_ESP); + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct xfrm_offload *xo = NULL; + struct xfrm_state *xs = NULL; + struct ipv6hdr *ip6 = NULL; + struct iphdr *ip4 = NULL; + void *daddr; + __be32 spi; + u8 *c_hdr; + u8 proto; + + /* Find the IP and crypto headers in the data. + * We can assume no VLAN header in the way, b/c the + * hw won't recognize the IPsec packet and anyway the + * currently VLAN device doesn't support xfrm offload. + */ + if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) { + ip4 = (struct iphdr *)(skb->data + ETH_HLEN); + daddr = &ip4->daddr; + c_hdr = (u8 *)ip4 + ip4->ihl * 4; + } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) { + ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); + daddr = &ip6->daddr; + c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); + } else { + return; + } + + switch (pkt_info & ipsec_pkt_types) { + case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH): + spi = ((struct ip_auth_hdr *)c_hdr)->spi; + proto = IPPROTO_AH; + break; + case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP): + spi = ((struct ip_esp_hdr *)c_hdr)->spi; + proto = IPPROTO_ESP; + break; + default: + return; + } + + xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4); + if (unlikely(!xs)) + return; + + skb->sp = secpath_dup(skb->sp); + if (unlikely(!skb->sp)) + return; + + skb->sp->xvec[skb->sp->len++] = xs; + skb->sp->olen++; + xo = xfrm_offload(skb); + xo->flags = CRYPTO_DONE; + xo->status = CRYPTO_SUCCESS; + + adapter->rx_ipsec++; +} + +/** + * ixgbevf_init_ipsec_offload - initialize registers for IPsec operation + * @adapter: board private structure + **/ +void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_ipsec *ipsec; + size_t size; + + switch (adapter->hw.api_version) { + case ixgbe_mbox_api_14: + break; + default: + return; + } + + ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); + if (!ipsec) + goto err1; + hash_init(ipsec->rx_sa_list); + + size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; + ipsec->rx_tbl = kzalloc(size, GFP_KERNEL); + if (!ipsec->rx_tbl) + goto err2; + + size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; + ipsec->tx_tbl = kzalloc(size, GFP_KERNEL); + if (!ipsec->tx_tbl) + goto err2; + + ipsec->num_rx_sa = 0; + ipsec->num_tx_sa = 0; + + adapter->ipsec = ipsec; + + adapter->netdev->xfrmdev_ops = &ixgbevf_xfrmdev_ops; + +#define IXGBEVF_ESP_FEATURES (NETIF_F_HW_ESP | \ + NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) + + adapter->netdev->features |= IXGBEVF_ESP_FEATURES; + adapter->netdev->hw_enc_features |= IXGBEVF_ESP_FEATURES; + + return; + +err2: + kfree(ipsec->rx_tbl); + kfree(ipsec->tx_tbl); + kfree(ipsec); +err1: + netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); +} + +/** + * ixgbevf_stop_ipsec_offload - tear down the IPsec offload + * @adapter: board private structure + **/ +void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + + adapter->ipsec = NULL; + if (ipsec) { + kfree(ipsec->rx_tbl); + kfree(ipsec->tx_tbl); + kfree(ipsec); + } +} diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.h b/drivers/net/ethernet/intel/ixgbevf/ipsec.h new file mode 100644 index 000000000000..3740725041c3 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ + +#ifndef _IXGBEVF_IPSEC_H_ +#define _IXGBEVF_IPSEC_H_ + +#define IXGBE_IPSEC_MAX_SA_COUNT 1024 +#define IXGBE_IPSEC_BASE_RX_INDEX 0 +#define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT +#define IXGBE_IPSEC_AUTH_BITS 128 + +#define IXGBE_RXMOD_VALID 0x00000001 +#define IXGBE_RXMOD_PROTO_ESP 0x00000004 +#define IXGBE_RXMOD_DECRYPT 0x00000008 +#define IXGBE_RXMOD_IPV6 0x00000010 + +struct rx_sa { + struct hlist_node hlist; + struct xfrm_state *xs; + __be32 ipaddr[4]; + u32 key[4]; + u32 salt; + u32 mode; + u32 pfsa; + bool used; + bool decrypt; +}; + +struct rx_ip_sa { + __be32 ipaddr[4]; + u32 ref_cnt; + bool used; +}; + +struct tx_sa { + struct xfrm_state *xs; + u32 key[4]; + u32 salt; + u32 pfsa; + bool encrypt; + bool used; +}; + +struct ixgbevf_ipsec_tx_data { + u32 flags; + u16 trailer_len; + u16 pfsa; +}; + +struct ixgbevf_ipsec { + u16 num_rx_sa; + u16 num_tx_sa; + struct rx_sa *rx_tbl; + struct tx_sa *tx_tbl; + DECLARE_HASHTABLE(rx_sa_list, 10); +}; + +struct sa_mbx_msg { + __be32 spi; + u8 flags; + u8 proto; + u16 family; + __be32 addr[4]; + u32 key[5]; +}; +#endif /* _IXGBEVF_IPSEC_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 56a1031dcc07..e399e1c0c54a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -14,6 +14,7 @@ #include <net/xdp.h> #include "vf.h" +#include "ipsec.h" #define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR) @@ -163,6 +164,7 @@ struct ixgbevf_ring { #define IXGBE_TX_FLAGS_VLAN BIT(1) #define IXGBE_TX_FLAGS_TSO BIT(2) #define IXGBE_TX_FLAGS_IPV4 BIT(3) +#define IXGBE_TX_FLAGS_IPSEC BIT(4) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 @@ -338,6 +340,7 @@ struct ixgbevf_adapter { struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ u64 restart_queue; u32 tx_timeout_count; + u64 tx_ipsec; /* RX */ int num_rx_queues; @@ -348,6 +351,7 @@ struct ixgbevf_adapter { u64 alloc_rx_page_failed; u64 alloc_rx_buff_failed; u64 alloc_rx_page; + u64 rx_ipsec; struct msix_entry *msix_entries; @@ -384,6 +388,10 @@ struct ixgbevf_adapter { u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; u32 flags; #define IXGBEVF_FLAGS_LEGACY_RX BIT(1) + +#ifdef CONFIG_XFRM + struct ixgbevf_ipsec *ipsec; +#endif /* CONFIG_XFRM */ }; enum ixbgevf_state_t { @@ -451,6 +459,31 @@ int ethtool_ioctl(struct ifreq *ifr); extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); +#ifdef CONFIG_XFRM_OFFLOAD +void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter); +void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter); +void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter); +void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd); +#else +static inline void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) +{ } +static inline void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter) +{ } +static inline void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) { } +static inline void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { } +static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd) +{ return 0; } +#endif /* CONFIG_XFRM_OFFLOAD */ + void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 5a228582423b..98707ee11d72 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -40,7 +40,7 @@ static const char ixgbevf_driver_string[] = #define DRV_VERSION "4.1.0-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2015 Intel Corporation."; + "Copyright (c) 2009 - 2018 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, @@ -79,7 +79,7 @@ MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) @@ -268,7 +268,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - unsigned int total_bytes = 0, total_packets = 0; + unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; unsigned int budget = tx_ring->count / 2; unsigned int i = tx_ring->next_to_clean; @@ -299,6 +299,8 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, /* update the statistics for this packet */ total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; + if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) + total_ipsec++; /* free the skb */ if (ring_is_xdp(tx_ring)) @@ -361,6 +363,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; + adapter->tx_ipsec += total_ipsec; if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { struct ixgbe_hw *hw = &adapter->hw; @@ -516,6 +519,9 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) + ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); } @@ -1012,7 +1018,7 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); context_desc->vlan_macip_lens = cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); - context_desc->seqnum_seed = 0; + context_desc->fceof_saidx = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT); @@ -2200,6 +2206,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter) ixgbevf_set_rx_mode(adapter->netdev); ixgbevf_restore_vlan(adapter); + ixgbevf_ipsec_restore(adapter); ixgbevf_configure_tx(adapter); ixgbevf_configure_rx(adapter); @@ -2246,7 +2253,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_13, + int api[] = { ixgbe_mbox_api_14, + ixgbe_mbox_api_13, ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, @@ -2605,6 +2613,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: if (adapter->xdp_prog && hw->mac.max_tx_queues == rss) rss = rss > 3 ? 2 : 1; @@ -3700,8 +3709,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) } static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, - u32 vlan_macip_lens, u32 type_tucmd, - u32 mss_l4len_idx) + u32 vlan_macip_lens, u32 fceof_saidx, + u32 type_tucmd, u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; @@ -3715,14 +3724,15 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; + context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); } static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, - u8 *hdr_len) + u8 *hdr_len, + struct ixgbevf_ipsec_tx_data *itd) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; @@ -3736,6 +3746,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, unsigned char *hdr; } l4; u32 paylen, l4_offset; + u32 fceof_saidx = 0; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3761,13 +3772,15 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, if (ip.v4->version == 4) { unsigned char *csum_start = skb_checksum_start(skb); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + int len = csum_start - trans_start; /* IP header will have to cancel out any data that - * is not a part of the outer IP header + * is not a part of the outer IP header, so set to + * a reverse csum if needed, else init check to 0. */ - ip.v4->check = csum_fold(csum_partial(trans_start, - csum_start - trans_start, - 0)); + ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? + csum_fold(csum_partial(trans_start, + len, 0)) : 0; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; @@ -3799,13 +3812,16 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); + fceof_saidx |= itd->pfsa; + type_tucmd |= itd->flags | itd->trailer_len; + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, + mss_l4len_idx); return 1; } @@ -3820,10 +3836,12 @@ static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) } static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, - struct ixgbevf_tx_buffer *first) + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; + u32 fceof_saidx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3849,6 +3867,10 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, skb_checksum_help(skb); goto no_csum; } + + if (first->protocol == htons(ETH_P_IP)) + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; vlan_macip_lens = skb_checksum_start_offset(skb) - @@ -3858,7 +3880,11 @@ no_csum: vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); + fceof_saidx |= itd->pfsa; + type_tucmd |= itd->flags | itd->trailer_len; + + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, + fceof_saidx, type_tucmd, 0); } static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) @@ -3892,8 +3918,12 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); - /* use index 1 context for TSO/FSO/FCOE */ - if (tx_flags & IXGBE_TX_FLAGS_TSO) + /* enable IPsec */ + if (tx_flags & IXGBE_TX_FLAGS_IPSEC) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC); + + /* use index 1 context for TSO/FSO/FCOE/IPSEC */ + if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC)) olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); /* Check Context must be set if Tx switch is enabled, which it @@ -4075,6 +4105,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, int tso; u32 tx_flags = 0; u16 count = TXD_USE_COUNT(skb_headlen(skb)); + struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 }; #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD unsigned short f; #endif @@ -4119,11 +4150,15 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, first->tx_flags = tx_flags; first->protocol = vlan_get_protocol(skb); - tso = ixgbevf_tso(tx_ring, first, &hdr_len); +#ifdef CONFIG_XFRM_OFFLOAD + if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) + goto out_drop; +#endif + tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); if (tso < 0) goto out_drop; else if (!tso) - ixgbevf_tx_csum(tx_ring, first); + ixgbevf_tx_csum(tx_ring, first, &ipsec_tx); ixgbevf_tx_map(tx_ring, first, hdr_len); @@ -4613,6 +4648,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; @@ -4648,6 +4684,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, netdev); netif_carrier_off(netdev); + ixgbevf_init_ipsec_offload(adapter); ixgbevf_init_last_counter_stats(adapter); @@ -4714,6 +4751,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); + ixgbevf_stop_ipsec_offload(adapter); ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_reset_interrupt_capability(adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index bfd9ae150808..853796c8ef0e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -62,6 +62,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -92,6 +93,10 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_UPDATE_XCAST_MODE 0x0c +/* mailbox API, version 1.4 VF requests */ +#define IXGBE_VF_IPSEC_ADD 0x0d +#define IXGBE_VF_IPSEC_DEL 0x0e + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index bf0577e819e1..cd3b81300cc7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -309,6 +309,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) * is not supported for this device type. */ switch (hw->api_version) { + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: if (hw->mac.type < ixgbe_mac_X550_vf) @@ -376,6 +377,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) * or if the operation is not supported for this device type. */ switch (hw->api_version) { + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: if (hw->mac.type < ixgbe_mac_X550_vf) @@ -540,6 +542,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) return -EOPNOTSUPP; /* Fall threw */ + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: break; default: @@ -890,6 +893,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: break; default: return 0; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index e08301d833e2..32ac9045cdae 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -365,15 +365,8 @@ ltq_etop_mdio_probe(struct net_device *dev) return PTR_ERR(phydev); } - phydev->supported &= (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg - | SUPPORTED_MII - | SUPPORTED_TP); - - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, SPEED_100); + phy_attached_info(phydev); return 0; @@ -439,6 +432,7 @@ ltq_etop_open(struct net_device *dev) if (!IS_TX(i) && (!IS_RX(i))) continue; ltq_dma_open(&ch->dma); + ltq_dma_enable_irq(&ch->dma); napi_enable(&ch->napi); } phy_start(dev->phydev); diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c new file mode 100644 index 000000000000..8c5ba4b81fb7 --- /dev/null +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -0,0 +1,567 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lantiq / Intel PMAC driver for XRX200 SoCs + * + * Copyright (C) 2010 Lantiq Deutschland + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de> + */ + +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/delay.h> + +#include <linux/of_net.h> +#include <linux/of_platform.h> + +#include <xway_dma.h> + +/* DMA */ +#define XRX200_DMA_DATA_LEN 0x600 +#define XRX200_DMA_RX 0 +#define XRX200_DMA_TX 1 + +/* cpu port mac */ +#define PMAC_RX_IPG 0x0024 +#define PMAC_RX_IPG_MASK 0xf + +#define PMAC_HD_CTL 0x0000 +/* Add Ethernet header to packets from DMA to PMAC */ +#define PMAC_HD_CTL_ADD BIT(0) +/* Add VLAN tag to Packets from DMA to PMAC */ +#define PMAC_HD_CTL_TAG BIT(1) +/* Add CRC to packets from DMA to PMAC */ +#define PMAC_HD_CTL_AC BIT(2) +/* Add status header to packets from PMAC to DMA */ +#define PMAC_HD_CTL_AS BIT(3) +/* Remove CRC from packets from PMAC to DMA */ +#define PMAC_HD_CTL_RC BIT(4) +/* Remove Layer-2 header from packets from PMAC to DMA */ +#define PMAC_HD_CTL_RL2 BIT(5) +/* Status header is present from DMA to PMAC */ +#define PMAC_HD_CTL_RXSH BIT(6) +/* Add special tag from PMAC to switch */ +#define PMAC_HD_CTL_AST BIT(7) +/* Remove specail Tag from PMAC to DMA */ +#define PMAC_HD_CTL_RST BIT(8) +/* Check CRC from DMA to PMAC */ +#define PMAC_HD_CTL_CCRC BIT(9) +/* Enable reaction to Pause frames in the PMAC */ +#define PMAC_HD_CTL_FC BIT(10) + +struct xrx200_chan { + int tx_free; + + struct napi_struct napi; + struct ltq_dma_channel dma; + struct sk_buff *skb[LTQ_DESC_NUM]; + + struct xrx200_priv *priv; +}; + +struct xrx200_priv { + struct clk *clk; + + struct xrx200_chan chan_tx; + struct xrx200_chan chan_rx; + + struct net_device *net_dev; + struct device *dev; + + __iomem void *pmac_reg; +}; + +static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset) +{ + return __raw_readl(priv->pmac_reg + offset); +} + +static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset) +{ + __raw_writel(val, priv->pmac_reg + offset); +} + +static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set, + u32 offset) +{ + u32 val = xrx200_pmac_r32(priv, offset); + + val &= ~(clear); + val |= set; + xrx200_pmac_w32(priv, val, offset); +} + +/* drop all the packets from the DMA ring */ +static void xrx200_flush_dma(struct xrx200_chan *ch) +{ + int i; + + for (i = 0; i < LTQ_DESC_NUM; i++) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C) + break; + + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | + XRX200_DMA_DATA_LEN; + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + } +} + +static int xrx200_open(struct net_device *net_dev) +{ + struct xrx200_priv *priv = netdev_priv(net_dev); + + napi_enable(&priv->chan_tx.napi); + ltq_dma_open(&priv->chan_tx.dma); + ltq_dma_enable_irq(&priv->chan_tx.dma); + + napi_enable(&priv->chan_rx.napi); + ltq_dma_open(&priv->chan_rx.dma); + /* The boot loader does not always deactivate the receiving of frames + * on the ports and then some packets queue up in the PPE buffers. + * They already passed the PMAC so they do not have the tags + * configured here. Read the these packets here and drop them. + * The HW should have written them into memory after 10us + */ + usleep_range(20, 40); + xrx200_flush_dma(&priv->chan_rx); + ltq_dma_enable_irq(&priv->chan_rx.dma); + + netif_wake_queue(net_dev); + + return 0; +} + +static int xrx200_close(struct net_device *net_dev) +{ + struct xrx200_priv *priv = netdev_priv(net_dev); + + netif_stop_queue(net_dev); + + napi_disable(&priv->chan_rx.napi); + ltq_dma_close(&priv->chan_rx.dma); + + napi_disable(&priv->chan_tx.napi); + ltq_dma_close(&priv->chan_tx.dma); + + return 0; +} + +static int xrx200_alloc_skb(struct xrx200_chan *ch) +{ + int ret = 0; + + ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev, + XRX200_DMA_DATA_LEN); + if (!ch->skb[ch->dma.desc]) { + ret = -ENOMEM; + goto skip; + } + + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev, + ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ch->priv->dev, + ch->dma.desc_base[ch->dma.desc].addr))) { + dev_kfree_skb_any(ch->skb[ch->dma.desc]); + ret = -ENOMEM; + goto skip; + } + +skip: + ch->dma.desc_base[ch->dma.desc].ctl = + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | + XRX200_DMA_DATA_LEN; + + return ret; +} + +static int xrx200_hw_receive(struct xrx200_chan *ch) +{ + struct xrx200_priv *priv = ch->priv; + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + struct sk_buff *skb = ch->skb[ch->dma.desc]; + int len = (desc->ctl & LTQ_DMA_SIZE_MASK); + struct net_device *net_dev = priv->net_dev; + int ret; + + ret = xrx200_alloc_skb(ch); + + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + + if (ret) { + netdev_err(net_dev, "failed to allocate new rx buffer\n"); + return ret; + } + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, net_dev); + netif_receive_skb(skb); + net_dev->stats.rx_packets++; + net_dev->stats.rx_bytes += len - ETH_FCS_LEN; + + return 0; +} + +static int xrx200_poll_rx(struct napi_struct *napi, int budget) +{ + struct xrx200_chan *ch = container_of(napi, + struct xrx200_chan, napi); + int rx = 0; + int ret; + + while (rx < budget) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + ret = xrx200_hw_receive(ch); + if (ret) + return ret; + rx++; + } else { + break; + } + } + + if (rx < budget) { + napi_complete(&ch->napi); + ltq_dma_enable_irq(&ch->dma); + } + + return rx; +} + +static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget) +{ + struct xrx200_chan *ch = container_of(napi, + struct xrx200_chan, napi); + struct net_device *net_dev = ch->priv->net_dev; + int pkts = 0; + int bytes = 0; + + while (pkts < budget) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free]; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + struct sk_buff *skb = ch->skb[ch->tx_free]; + + pkts++; + bytes += skb->len; + ch->skb[ch->tx_free] = NULL; + consume_skb(skb); + memset(&ch->dma.desc_base[ch->tx_free], 0, + sizeof(struct ltq_dma_desc)); + ch->tx_free++; + ch->tx_free %= LTQ_DESC_NUM; + } else { + break; + } + } + + net_dev->stats.tx_packets += pkts; + net_dev->stats.tx_bytes += bytes; + netdev_completed_queue(ch->priv->net_dev, pkts, bytes); + + if (pkts < budget) { + napi_complete(&ch->napi); + ltq_dma_enable_irq(&ch->dma); + } + + return pkts; +} + +static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *net_dev) +{ + struct xrx200_priv *priv = netdev_priv(net_dev); + struct xrx200_chan *ch = &priv->chan_tx; + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + u32 byte_offset; + dma_addr_t mapping; + int len; + + skb->dev = net_dev; + if (skb_put_padto(skb, ETH_ZLEN)) { + net_dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + len = skb->len; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { + netdev_err(net_dev, "tx ring full\n"); + netif_stop_queue(net_dev); + return NETDEV_TX_BUSY; + } + + ch->skb[ch->dma.desc] = skb; + + mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, mapping))) + goto err_drop; + + /* dma needs to start on a 16 byte aligned address */ + byte_offset = mapping % 16; + + desc->addr = mapping - byte_offset; + /* Make sure the address is written before we give it to HW */ + wmb(); + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + if (ch->dma.desc == ch->tx_free) + netif_stop_queue(net_dev); + + netdev_sent_queue(net_dev, len); + + return NETDEV_TX_OK; + +err_drop: + dev_kfree_skb(skb); + net_dev->stats.tx_dropped++; + net_dev->stats.tx_errors++; + return NETDEV_TX_OK; +} + +static const struct net_device_ops xrx200_netdev_ops = { + .ndo_open = xrx200_open, + .ndo_stop = xrx200_close, + .ndo_start_xmit = xrx200_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static irqreturn_t xrx200_dma_irq(int irq, void *ptr) +{ + struct xrx200_chan *ch = ptr; + + ltq_dma_disable_irq(&ch->dma); + ltq_dma_ack_irq(&ch->dma); + + napi_schedule(&ch->napi); + + return IRQ_HANDLED; +} + +static int xrx200_dma_init(struct xrx200_priv *priv) +{ + struct xrx200_chan *ch_rx = &priv->chan_rx; + struct xrx200_chan *ch_tx = &priv->chan_tx; + int ret = 0; + int i; + + ltq_dma_init_port(DMA_PORT_ETOP); + + ch_rx->dma.nr = XRX200_DMA_RX; + ch_rx->dma.dev = priv->dev; + ch_rx->priv = priv; + + ltq_dma_alloc_rx(&ch_rx->dma); + for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; + ch_rx->dma.desc++) { + ret = xrx200_alloc_skb(ch_rx); + if (ret) + goto rx_free; + } + ch_rx->dma.desc = 0; + ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0, + "xrx200_net_rx", &priv->chan_rx); + if (ret) { + dev_err(priv->dev, "failed to request RX irq %d\n", + ch_rx->dma.irq); + goto rx_ring_free; + } + + ch_tx->dma.nr = XRX200_DMA_TX; + ch_tx->dma.dev = priv->dev; + ch_tx->priv = priv; + + ltq_dma_alloc_tx(&ch_tx->dma); + ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0, + "xrx200_net_tx", &priv->chan_tx); + if (ret) { + dev_err(priv->dev, "failed to request TX irq %d\n", + ch_tx->dma.irq); + goto tx_free; + } + + return ret; + +tx_free: + ltq_dma_free(&ch_tx->dma); + +rx_ring_free: + /* free the allocated RX ring */ + for (i = 0; i < LTQ_DESC_NUM; i++) { + if (priv->chan_rx.skb[i]) + dev_kfree_skb_any(priv->chan_rx.skb[i]); + } + +rx_free: + ltq_dma_free(&ch_rx->dma); + return ret; +} + +static void xrx200_hw_cleanup(struct xrx200_priv *priv) +{ + int i; + + ltq_dma_free(&priv->chan_tx.dma); + ltq_dma_free(&priv->chan_rx.dma); + + /* free the allocated RX ring */ + for (i = 0; i < LTQ_DESC_NUM; i++) + dev_kfree_skb_any(priv->chan_rx.skb[i]); +} + +static int xrx200_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res; + struct xrx200_priv *priv; + struct net_device *net_dev; + const u8 *mac; + int err; + + /* alloc the network device */ + net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv)); + if (!net_dev) + return -ENOMEM; + + priv = netdev_priv(net_dev); + priv->net_dev = net_dev; + priv->dev = dev; + + net_dev->netdev_ops = &xrx200_netdev_ops; + SET_NETDEV_DEV(net_dev, dev); + net_dev->min_mtu = ETH_ZLEN; + net_dev->max_mtu = XRX200_DMA_DATA_LEN; + + /* load the memory ranges */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "failed to get resources\n"); + return -ENOENT; + } + + priv->pmac_reg = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->pmac_reg)) { + dev_err(dev, "failed to request and remap io ranges\n"); + return PTR_ERR(priv->pmac_reg); + } + + priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx"); + if (priv->chan_rx.dma.irq < 0) { + dev_err(dev, "failed to get RX IRQ, %i\n", + priv->chan_rx.dma.irq); + return -ENOENT; + } + priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx"); + if (priv->chan_tx.dma.irq < 0) { + dev_err(dev, "failed to get TX IRQ, %i\n", + priv->chan_tx.dma.irq); + return -ENOENT; + } + + /* get the clock */ + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to get clock\n"); + return PTR_ERR(priv->clk); + } + + mac = of_get_mac_address(np); + if (mac && is_valid_ether_addr(mac)) + ether_addr_copy(net_dev->dev_addr, mac); + else + eth_hw_addr_random(net_dev); + + /* bring up the dma engine and IP core */ + err = xrx200_dma_init(priv); + if (err) + return err; + + /* enable clock gate */ + err = clk_prepare_enable(priv->clk); + if (err) + goto err_uninit_dma; + + /* set IPG to 12 */ + xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG); + + /* enable status header, enable CRC */ + xrx200_pmac_mask(priv, 0, + PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | + PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC, + PMAC_HD_CTL); + + /* setup NAPI */ + netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32); + netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); + + platform_set_drvdata(pdev, priv); + + err = register_netdev(net_dev); + if (err) + goto err_unprepare_clk; + return err; + +err_unprepare_clk: + clk_disable_unprepare(priv->clk); + +err_uninit_dma: + xrx200_hw_cleanup(priv); + + return 0; +} + +static int xrx200_remove(struct platform_device *pdev) +{ + struct xrx200_priv *priv = platform_get_drvdata(pdev); + struct net_device *net_dev = priv->net_dev; + + /* free stack related instances */ + netif_stop_queue(net_dev); + netif_napi_del(&priv->chan_tx.napi); + netif_napi_del(&priv->chan_rx.napi); + + /* remove the actual device */ + unregister_netdev(net_dev); + + /* release the clock */ + clk_disable_unprepare(priv->clk); + + /* shut down hardware */ + xrx200_hw_cleanup(priv); + + return 0; +} + +static const struct of_device_id xrx200_match[] = { + { .compatible = "lantiq,xrx200-net" }, + {}, +}; +MODULE_DEVICE_TABLE(of, xrx200_match); + +static struct platform_driver xrx200_driver = { + .probe = xrx200_probe, + .remove = xrx200_remove, + .driver = { + .name = "lantiq,xrx200-net", + .of_match_table = xrx200_match, + }, +}; + +module_platform_driver(xrx200_driver); + +MODULE_AUTHOR("John Crispin <john@phrozen.org>"); +MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index f33fd22b351c..3238aa7f5dac 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -167,4 +167,7 @@ config SKY2_DEBUG If unsure, say N. + +source "drivers/net/ethernet/marvell/octeontx2/Kconfig" + endif # NET_VENDOR_MARVELL diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index 55d4d10aa7d3..89dea7284d5b 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_MVPP2) += mvpp2/ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKY2) += sky2.o +obj-y += octeontx2/ diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 62f204f32316..1e9bcbdc6a90 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2733,17 +2733,17 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, memset(&res, 0, sizeof(res)); if (of_irq_to_resource(pnp, 0, &res) <= 0) { - dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); + dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp); return -EINVAL; } if (of_property_read_u32(pnp, "reg", &ppd.port_number)) { - dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name); + dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp); return -EINVAL; } if (ppd.port_number >= 3) { - dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name); + dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp); return -EINVAL; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b4ed7d394d07..5bfd349bf41a 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -221,6 +221,8 @@ #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) +#define MVNETA_GMAC_CTRL_4 0x2c90 +#define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1) #define MVNETA_MIB_COUNTERS_BASE 0x3000 #define MVNETA_MIB_LATE_COLLISION 0x7c #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 @@ -2064,10 +2066,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, /* Linux processing */ rxq->skb->protocol = eth_type_trans(rxq->skb, dev); - if (dev->features & NETIF_F_GRO) - napi_gro_receive(napi, rxq->skb); - else - netif_receive_skb(rxq->skb); + napi_gro_receive(napi, rxq->skb); /* clean uncomplete skb pointer in queue */ rxq->skb = NULL; @@ -2395,7 +2394,7 @@ error: } /* Main tx processing */ -static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); @@ -2509,12 +2508,13 @@ static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) { struct mvneta_tx_queue *txq; struct netdev_queue *nq; + int cpu = smp_processor_id(); while (cause_tx_done) { txq = mvneta_tx_done_policy(pp, cause_tx_done); nq = netdev_get_tx_queue(pp->dev, txq->id); - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq, cpu); if (txq->count) mvneta_txq_done(pp, txq); @@ -3343,6 +3343,7 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, if (state->interface != PHY_INTERFACE_MODE_NA && state->interface != PHY_INTERFACE_MODE_QSGMII && state->interface != PHY_INTERFACE_MODE_SGMII && + state->interface != PHY_INTERFACE_MODE_2500BASEX && !phy_interface_mode_is_8023z(state->interface) && !phy_interface_mode_is_rgmii(state->interface)) { bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -3355,9 +3356,15 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, /* Asymmetric pause is unsupported */ phylink_set(mask, Pause); - /* Half-duplex at speeds higher than 100Mbit is unsupported */ - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); + + /* We cannot use 1Gbps when using the 2.5G interface. */ + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { + phylink_set(mask, 2500baseT_Full); + phylink_set(mask, 2500baseX_Full); + } else { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + } if (!phy_interface_mode_is_8023z(state->interface)) { /* 10M and 100M are only supported in non-802.3z mode */ @@ -3418,12 +3425,14 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode, struct mvneta_port *pp = netdev_priv(ndev); u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); + u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | MVNETA_GMAC2_PORT_RESET); + new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE); new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE; new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE | MVNETA_GMAC_INBAND_RESTART_AN | @@ -3456,7 +3465,7 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode, if (state->duplex) new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; - if (state->speed == SPEED_1000) + if (state->speed == SPEED_1000 || state->speed == SPEED_2500) new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED; else if (state->speed == SPEED_100) new_an |= MVNETA_GMAC_CONFIG_MII_SPEED; @@ -3495,10 +3504,18 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode, MVNETA_GMAC_FORCE_LINK_DOWN); } + /* When at 2.5G, the link partner can send frames with shortened + * preambles. + */ + if (state->speed == SPEED_2500) + new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; + if (new_ctrl0 != gmac_ctrl0) mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); if (new_ctrl2 != gmac_ctrl2) mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); + if (new_ctrl4 != gmac_ctrl4) + mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); if (new_clk != gmac_clk) mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk); if (new_an != gmac_an) @@ -3792,9 +3809,6 @@ static int mvneta_open(struct net_device *dev) goto err_free_online_hp; } - /* In default link is down */ - netif_carrier_off(pp->dev); - ret = mvneta_mdio_probe(pp); if (ret < 0) { netdev_err(dev, "cannot probe MDIO bus\n"); @@ -4597,7 +4611,8 @@ static int mvneta_probe(struct platform_device *pdev) } } - dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO; + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_RXCSUM; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 67b9e81b7c02..176c6b56fdcc 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -253,7 +253,8 @@ #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) -#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(version) \ + ((version) == MVPP21 ? 0xffff : 0xff) #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) @@ -330,6 +331,7 @@ #define MVPP2_TXP_SCHED_ENQ_MASK 0xff #define MVPP2_TXP_SCHED_DISQ_OFFSET 8 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 +#define MVPP2_TXP_SCHED_FIXED_PRIO_REG 0x8014 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 #define MVPP2_TXP_SCHED_MTU_REG 0x801c #define MVPP2_TXP_MTU_MAX 0x7FFFF @@ -613,6 +615,7 @@ /* Port flags */ #define MVPP2_F_LOOPBACK BIT(0) +#define MVPP2_F_DT_COMPAT BIT(1) /* Marvell tag types */ enum mvpp2_tag_type { @@ -662,7 +665,7 @@ enum mvpp2_prs_l3_cast { #define MVPP21_ADDR_SPACE_SZ 0 #define MVPP22_ADDR_SPACE_SZ SZ_64K -#define MVPP2_MAX_THREADS 8 +#define MVPP2_MAX_THREADS 9 #define MVPP2_MAX_QVECS MVPP2_MAX_THREADS /* GMAC MIB Counters register definitions */ @@ -734,6 +737,11 @@ struct mvpp2 { int port_count; struct mvpp2_port *port_list[MVPP2_MAX_PORTS]; + /* Number of Tx threads used */ + unsigned int nthreads; + /* Map of threads needing locking */ + unsigned long lock_map; + /* Aggregated TXQs */ struct mvpp2_tx_queue *aggr_txqs; @@ -823,6 +831,12 @@ struct mvpp2_port { /* Per-CPU port control */ struct mvpp2_port_pcpu __percpu *pcpu; + /* Protect the BM refills and the Tx paths when a thread is used on more + * than a single CPU. + */ + spinlock_t bm_lock[MVPP2_MAX_THREADS]; + spinlock_t tx_lock[MVPP2_MAX_THREADS]; + /* Flags */ unsigned long flags; @@ -969,7 +983,7 @@ struct mvpp2_txq_pcpu_buf { /* Per-CPU Tx queue control */ struct mvpp2_txq_pcpu { - int cpu; + unsigned int thread; /* Number of Tx DMA descriptors in the descriptor ring */ int size; @@ -1095,14 +1109,6 @@ struct mvpp2_bm_pool { void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data); u32 mvpp2_read(struct mvpp2 *priv, u32 offset); -u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset); - -void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data); -u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset); - -void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset, - u32 data); - void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name); void mvpp2_dbgfs_cleanup(struct mvpp2 *priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index a74002b43b51..14f9679c957c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -82,13 +82,19 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset) return readl(priv->swth_base[0] + offset); } -u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) +static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) { return readl_relaxed(priv->swth_base[0] + offset); } + +static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) +{ + return cpu % priv->nthreads; +} + /* These accessors should be used to access: * - * - per-CPU registers, where each CPU has its own copy of the + * - per-thread registers, where each thread has its own copy of the * register. * * MVPP2_BM_VIRT_ALLOC_REG @@ -104,8 +110,8 @@ u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) * MVPP2_TXQ_SENT_REG * MVPP2_RXQ_NUM_REG * - * - global registers that must be accessed through a specific CPU - * window, because they are related to an access to a per-CPU + * - global registers that must be accessed through a specific thread + * window, because they are related to an access to a per-thread * register * * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) @@ -122,28 +128,28 @@ u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) */ -void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, +static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, u32 offset, u32 data) { - writel(data, priv->swth_base[cpu] + offset); + writel(data, priv->swth_base[thread] + offset); } -u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, +static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, u32 offset) { - return readl(priv->swth_base[cpu] + offset); + return readl(priv->swth_base[thread] + offset); } -void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, +static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset, u32 data) { - writel_relaxed(data, priv->swth_base[cpu] + offset); + writel_relaxed(data, priv->swth_base[thread] + offset); } -static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, +static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset) { - return readl_relaxed(priv->swth_base[cpu] + offset); + return readl_relaxed(priv->swth_base[thread] + offset); } static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, @@ -385,17 +391,17 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, dma_addr_t *dma_addr, phys_addr_t *phys_addr) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); - *dma_addr = mvpp2_percpu_read(priv, cpu, + *dma_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); - *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); + *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG); if (priv->hw_version == MVPP22) { u32 val; u32 dma_addr_highbits, phys_addr_highbits; - val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); + val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; @@ -626,7 +632,11 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, dma_addr_t buf_dma_addr, phys_addr_t buf_phys_addr) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + unsigned long flags = 0; + + if (test_bit(thread, &port->priv->lock_map)) + spin_lock_irqsave(&port->bm_lock[thread], flags); if (port->priv->hw_version == MVPP22) { u32 val = 0; @@ -640,7 +650,7 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; - mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP22_BM_ADDR_HIGH_RLS_REG, val); } @@ -649,11 +659,14 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, * descriptor. Instead of storing the virtual address, we * store the physical address */ - mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); - mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); + if (test_bit(thread, &port->priv->lock_map)) + spin_unlock_irqrestore(&port->bm_lock[thread], flags); + put_cpu(); } @@ -886,7 +899,7 @@ static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); } -/* Mask the current CPU's Rx/Tx interrupts +/* Mask the current thread's Rx/Tx interrupts * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK. */ @@ -894,11 +907,16 @@ static void mvpp2_interrupts_mask(void *arg) { struct mvpp2_port *port = arg; - mvpp2_percpu_write(port->priv, smp_processor_id(), + /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); } -/* Unmask the current CPU's Rx/Tx interrupts. +/* Unmask the current thread's Rx/Tx interrupts. * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK. */ @@ -907,12 +925,17 @@ static void mvpp2_interrupts_unmask(void *arg) struct mvpp2_port *port = arg; u32 val; + /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + val = MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); if (port->has_tx_irqs) val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - mvpp2_percpu_write(port->priv, smp_processor_id(), + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_ISR_RX_TX_MASK_REG(port->id), val); } @@ -928,7 +951,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) if (mask) val = 0; else - val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22); for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *v = port->qvecs + i; @@ -936,7 +959,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) if (v->type != MVPP2_QUEUE_VECTOR_SHARED) continue; - mvpp2_percpu_write(port->priv, v->sw_thread_id, + mvpp2_thread_write(port->priv, v->sw_thread_id, MVPP2_ISR_RX_TX_MASK_REG(port->id), val); } } @@ -1425,6 +1448,9 @@ static void mvpp2_defaults_set(struct mvpp2_port *port) tx_port_num); mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); + /* Set TXQ scheduling to Round-Robin */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); + /* Close bandwidth for all queues */ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { ptxq = mvpp2_txq_phys(port->id, queue); @@ -1624,7 +1650,8 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) { /* aggregated access - relevant TXQ number is written in TX desc */ - mvpp2_percpu_write(port->priv, smp_processor_id(), + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_AGGR_TXQ_UPDATE_REG, pending); } @@ -1634,14 +1661,15 @@ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) * Called only from mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ -static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, +static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, struct mvpp2_tx_queue *aggr_txq, int num) { if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { /* Update number of occupied aggregated Tx descriptors */ - int cpu = smp_processor_id(); - u32 val = mvpp2_read_relaxed(priv, - MVPP2_AGGR_TXQ_STATUS_REG(cpu)); + unsigned int thread = + mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + u32 val = mvpp2_read_relaxed(port->priv, + MVPP2_AGGR_TXQ_STATUS_REG(thread)); aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; @@ -1657,16 +1685,17 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, * only by mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ -static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, +static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, int num) { + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2 *priv = port->priv; u32 val; - int cpu = smp_processor_id(); val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; - mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); + mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val); - val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); + val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG); return val & MVPP2_TXQ_RSVD_RSLT_MASK; } @@ -1674,12 +1703,13 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, /* Check if there are enough reserved descriptors for transmission. * If not, request chunk of reserved descriptors and check again. */ -static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, +static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_txq_pcpu *txq_pcpu, int num) { - int req, cpu, desc_count; + int req, desc_count; + unsigned int thread; if (txq_pcpu->reserved_num >= num) return 0; @@ -1690,10 +1720,10 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, desc_count = 0; /* Compute total of used descriptors */ - for_each_present_cpu(cpu) { + for (thread = 0; thread < port->priv->nthreads; thread++) { struct mvpp2_txq_pcpu *txq_pcpu_aux; - txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); desc_count += txq_pcpu_aux->count; desc_count += txq_pcpu_aux->reserved_num; } @@ -1702,10 +1732,10 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, desc_count += req; if (desc_count > - (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) + (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) return -ENOMEM; - txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); + txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req); /* OK, the descriptor could have been updated: check again. */ if (txq_pcpu->reserved_num < num) @@ -1759,7 +1789,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, /* Get number of sent descriptors and decrement counter. * The number of sent descriptors is returned. - * Per-CPU access + * Per-thread access * * Called only from mvpp2_txq_done(), called from mvpp2_tx() * (migration disabled) and from the TX completion tasklet (migration @@ -1771,7 +1801,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, u32 val; /* Reading status reg resets transmitted descriptor counter */ - val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), + val = mvpp2_thread_read_relaxed(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_TXQ_SENT_REG(txq->id)); return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> @@ -1786,10 +1817,15 @@ static void mvpp2_txq_sent_counter_clear(void *arg) struct mvpp2_port *port = arg; int queue; + /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + for (queue = 0; queue < port->ntxqs; queue++) { int id = port->txqs[queue]->id; - mvpp2_percpu_read(port->priv, smp_processor_id(), + mvpp2_thread_read(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_TXQ_SENT_REG(id)); } } @@ -1849,13 +1885,13 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal); put_cpu(); @@ -1865,15 +1901,15 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); u32 val; if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); put_cpu(); } @@ -1974,7 +2010,7 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); int tx_done; - if (txq_pcpu->cpu != smp_processor_id()) + if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); tx_done = mvpp2_txq_sent_desc_proc(port, txq); @@ -1990,7 +2026,7 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, } static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, - int cpu) + unsigned int thread) { struct mvpp2_tx_queue *txq; struct mvpp2_txq_pcpu *txq_pcpu; @@ -2001,7 +2037,7 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, if (!txq) break; - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); if (txq_pcpu->count) { mvpp2_txq_done(port, txq, txq_pcpu); @@ -2017,8 +2053,8 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, /* Allocate and initialize descriptors for aggr TXQ */ static int mvpp2_aggr_txq_init(struct platform_device *pdev, - struct mvpp2_tx_queue *aggr_txq, int cpu, - struct mvpp2 *priv) + struct mvpp2_tx_queue *aggr_txq, + unsigned int thread, struct mvpp2 *priv) { u32 txq_dma; @@ -2033,7 +2069,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, /* Aggr TXQ no reset WA */ aggr_txq->next_desc_to_proc = mvpp2_read(priv, - MVPP2_AGGR_TXQ_INDEX_REG(cpu)); + MVPP2_AGGR_TXQ_INDEX_REG(thread)); /* Set Tx descriptors queue starting address indirect * access @@ -2044,8 +2080,8 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, txq_dma = aggr_txq->descs_dma >> MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), MVPP2_AGGR_TXQ_SIZE); return 0; @@ -2056,8 +2092,8 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { + unsigned int thread; u32 rxq_dma; - int cpu; rxq->size = port->rx_ring_size; @@ -2074,15 +2110,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); /* Set Rx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); if (port->priv->hw_version == MVPP21) rxq_dma = rxq->descs_dma; else rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); put_cpu(); /* Set Offset */ @@ -2127,7 +2163,7 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, static void mvpp2_rxq_deinit(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu; + unsigned int thread; mvpp2_rxq_drop_pkts(port, rxq); @@ -2146,10 +2182,10 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port, * free descriptor number */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); put_cpu(); } @@ -2158,7 +2194,8 @@ static int mvpp2_txq_init(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { u32 val; - int cpu, desc, desc_per_txq, tx_port_num; + unsigned int thread; + int desc, desc_per_txq, tx_port_num; struct mvpp2_txq_pcpu *txq_pcpu; txq->size = port->tx_ring_size; @@ -2173,18 +2210,18 @@ static int mvpp2_txq_init(struct mvpp2_port *port, txq->last_desc = txq->size - 1; /* Set Tx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, txq->size & MVPP2_TXQ_DESC_SIZE_MASK); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); val &= ~MVPP2_TXQ_PENDING_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); /* Calculate base address in prefetch buffer. We reserve 16 descriptors * for each existing TXQ. @@ -2195,7 +2232,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + (txq->log_id * desc_per_txq); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); put_cpu(); @@ -2214,8 +2251,8 @@ static int mvpp2_txq_init(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), val); - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); txq_pcpu->size = txq->size; txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, sizeof(*txq_pcpu->buffs), @@ -2249,10 +2286,10 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; - int cpu; + unsigned int thread; - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); kfree(txq_pcpu->buffs); if (txq_pcpu->tso_headers) @@ -2278,10 +2315,10 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); /* Set Tx descriptors queue starting address and size */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); put_cpu(); } @@ -2289,14 +2326,14 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; - int delay, pending, cpu; + int delay, pending; + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); u32 val; - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); val |= MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); /* The napi queue has been stopped so wait for all packets * to be transmitted. @@ -2312,17 +2349,17 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) mdelay(1); delay++; - pending = mvpp2_percpu_read(port->priv, cpu, + pending = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); pending &= MVPP2_TXQ_PENDING_MASK; } while (pending); val &= ~MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); put_cpu(); - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); /* Release all packets */ mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); @@ -2389,13 +2426,17 @@ err_cleanup: static int mvpp2_setup_txqs(struct mvpp2_port *port) { struct mvpp2_tx_queue *txq; - int queue, err; + int queue, err, cpu; for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; err = mvpp2_txq_init(port, txq); if (err) goto err_cleanup; + + /* Assign this queue to a CPU */ + cpu = queue % num_present_cpus(); + netif_set_xps_queue(port->dev, cpumask_of(cpu), queue); } if (port->has_tx_irqs) { @@ -2503,16 +2544,20 @@ static void mvpp2_tx_proc_cb(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + struct mvpp2_port_pcpu *port_pcpu; unsigned int tx_todo, cause; + port_pcpu = per_cpu_ptr(port->pcpu, + mvpp2_cpu_to_thread(port->priv, smp_processor_id())); + if (!netif_running(dev)) return; port_pcpu->timer_scheduled = false; /* Process all the Tx queues */ cause = (1 << port->ntxqs) - 1; - tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); + tx_todo = mvpp2_tx_done(port, cause, + mvpp2_cpu_to_thread(port->priv, smp_processor_id())); /* Set the timer in case not all the packets were processed */ if (tx_todo) @@ -2729,7 +2774,8 @@ static inline void tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_tx_desc *desc) { - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); dma_addr_t buf_dma_addr = mvpp2_txdesc_dma_addr_get(port, desc); @@ -2746,7 +2792,8 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, struct mvpp2_tx_queue *aggr_txq, struct mvpp2_tx_queue *txq) { - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); struct mvpp2_tx_desc *tx_desc; int i; dma_addr_t buf_dma_addr; @@ -2865,9 +2912,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, int i, len, descs = 0; /* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, - tso_count_descs(skb)) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, + if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, tso_count_descs(skb))) return 0; @@ -2907,21 +2953,28 @@ release: } /* Main tx processing */ -static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_tx_queue *txq, *aggr_txq; struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_tx_desc *tx_desc; dma_addr_t buf_dma_addr; + unsigned long flags = 0; + unsigned int thread; int frags = 0; u16 txq_id; u32 tx_cmd; + thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + txq_id = skb_get_queue_mapping(skb); txq = port->txqs[txq_id]; - txq_pcpu = this_cpu_ptr(txq->pcpu); - aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + aggr_txq = &port->priv->aggr_txqs[thread]; + + if (test_bit(thread, &port->priv->lock_map)) + spin_lock_irqsave(&port->tx_lock[thread], flags); if (skb_is_gso(skb)) { frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); @@ -2930,9 +2983,8 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) frags = skb_shinfo(skb)->nr_frags + 1; /* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, - txq_pcpu, frags)) { + if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { frags = 0; goto out; } @@ -2974,7 +3026,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) out: if (frags > 0) { - struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); txq_pcpu->reserved_num -= frags; @@ -3004,11 +3056,14 @@ out: /* Set the timer in case not all frags were processed */ if (!port->has_tx_irqs && txq_pcpu->count <= frags && txq_pcpu->count > 0) { - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); mvpp2_timer_set(port_pcpu); } + if (test_bit(thread, &port->priv->lock_map)) + spin_unlock_irqrestore(&port->tx_lock[thread], flags); + return NETDEV_TX_OK; } @@ -3028,7 +3083,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); struct mvpp2_queue_vector *qv; - int cpu = smp_processor_id(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); qv = container_of(napi, struct mvpp2_queue_vector, napi); @@ -3042,7 +3097,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) * * Each CPU has its own Rx/Tx cause register */ - cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, + cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; @@ -3051,7 +3106,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) /* Clear the cause register */ mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); - mvpp2_percpu_write(port->priv, cpu, + mvpp2_thread_write(port->priv, thread, MVPP2_ISR_RX_TX_CAUSE_REG(port->id), cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } @@ -3065,7 +3120,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) } /* Process RX packets */ - cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_rx = cause_rx_tx & + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); cause_rx <<= qv->first_rxq; cause_rx |= qv->pending_cause_rx; while (cause_rx && budget > 0) { @@ -3140,14 +3196,13 @@ static void mvpp2_start_dev(struct mvpp2_port *port) for (i = 0; i < port->nqvecs; i++) napi_enable(&port->qvecs[i].napi); - /* Enable interrupts on all CPUs */ + /* Enable interrupts on all threads */ mvpp2_interrupts_enable(port); if (port->priv->hw_version == MVPP22) mvpp22_mode_reconfigure(port); if (port->phylink) { - netif_carrier_off(port->dev); phylink_start(port->phylink); } else { /* Phylink isn't used as of now for ACPI, so the MAC has to be @@ -3170,7 +3225,7 @@ static void mvpp2_stop_dev(struct mvpp2_port *port) { int i; - /* Disable interrupts on all CPUs */ + /* Disable interrupts on all threads */ mvpp2_interrupts_disable(port); for (i = 0; i < port->nqvecs; i++) @@ -3250,9 +3305,18 @@ static int mvpp2_irqs_init(struct mvpp2_port *port) if (err) goto err; - if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) - irq_set_affinity_hint(qv->irq, - cpumask_of(qv->sw_thread_id)); + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { + unsigned long mask = 0; + unsigned int cpu; + + for_each_present_cpu(cpu) { + if (mvpp2_cpu_to_thread(port->priv, cpu) == + qv->sw_thread_id) + mask |= BIT(cpu); + } + + irq_set_affinity_hint(qv->irq, to_cpumask(&mask)); + } } return 0; @@ -3396,11 +3460,11 @@ static int mvpp2_stop(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_port_pcpu *port_pcpu; - int cpu; + unsigned int thread; mvpp2_stop_dev(port); - /* Mask interrupts on all CPUs */ + /* Mask interrupts on all threads */ on_each_cpu(mvpp2_interrupts_mask, port, 1); mvpp2_shared_interrupt_mask_unmask(port, true); @@ -3411,8 +3475,8 @@ static int mvpp2_stop(struct net_device *dev) mvpp2_irqs_deinit(port); if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + port_pcpu = per_cpu_ptr(port->pcpu, thread); hrtimer_cancel(&port_pcpu->tx_done_timer); port_pcpu->timer_scheduled = false; @@ -3557,7 +3621,7 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mvpp2_port *port = netdev_priv(dev); unsigned int start; - int cpu; + unsigned int cpu; for_each_possible_cpu(cpu) { struct mvpp2_pcpu_stats *cpu_stats; @@ -3984,12 +4048,18 @@ static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, struct device_node *port_node) { + struct mvpp2 *priv = port->priv; struct mvpp2_queue_vector *v; int i, ret; - port->nqvecs = num_possible_cpus(); - if (queue_mode == MVPP2_QDIST_SINGLE_MODE) - port->nqvecs += 1; + switch (queue_mode) { + case MVPP2_QDIST_SINGLE_MODE: + port->nqvecs = priv->nthreads + 1; + break; + case MVPP2_QDIST_MULTI_MODE: + port->nqvecs = priv->nthreads; + break; + } for (i = 0; i < port->nqvecs; i++) { char irqname[16]; @@ -4001,7 +4071,10 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, v->sw_thread_id = i; v->sw_thread_mask = BIT(i); - snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + if (port->flags & MVPP2_F_DT_COMPAT) + snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + else + snprintf(irqname, sizeof(irqname), "hif%d", i); if (queue_mode == MVPP2_QDIST_MULTI_MODE) { v->first_rxq = i * MVPP2_DEFAULT_RXQ; @@ -4011,7 +4084,9 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, v->first_rxq = 0; v->nrxqs = port->nrxqs; v->type = MVPP2_QUEUE_VECTOR_SHARED; - strncpy(irqname, "rx-shared", sizeof(irqname)); + + if (port->flags & MVPP2_F_DT_COMPAT) + strncpy(irqname, "rx-shared", sizeof(irqname)); } if (port_node) @@ -4088,7 +4163,8 @@ static int mvpp2_port_init(struct mvpp2_port *port) struct device *dev = port->dev->dev.parent; struct mvpp2 *priv = port->priv; struct mvpp2_txq_pcpu *txq_pcpu; - int queue, cpu, err; + unsigned int thread; + int queue, err; /* Checks for hardware constraints */ if (port->first_rxq + port->nrxqs > @@ -4132,9 +4208,9 @@ static int mvpp2_port_init(struct mvpp2_port *port) txq->id = queue_phy_id; txq->log_id = queue; txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - txq_pcpu->cpu = cpu; + for (thread = 0; thread < priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + txq_pcpu->thread = thread; } port->txqs[queue] = txq; @@ -4207,24 +4283,51 @@ err_free_percpu: return err; } -/* Checks if the port DT description has the TX interrupts - * described. On PPv2.1, there are no such interrupts. On PPv2.2, - * there are available, but we need to keep support for old DTs. +static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, + unsigned long *flags) +{ + char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3" }; + int i; + + for (i = 0; i < 5; i++) + if (of_property_match_string(port_node, "interrupt-names", + irqs[i]) < 0) + return false; + + *flags |= MVPP2_F_DT_COMPAT; + return true; +} + +/* Checks if the port dt description has the required Tx interrupts: + * - PPv2.1: there are no such interrupts. + * - PPv2.2: + * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] + * - The new ones have: "hifX" with X in [0..8] + * + * All those variants are supported to keep the backward compatibility. */ -static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, - struct device_node *port_node) +static bool mvpp2_port_has_irqs(struct mvpp2 *priv, + struct device_node *port_node, + unsigned long *flags) { - char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", - "tx-cpu2", "tx-cpu3" }; - int ret, i; + char name[5]; + int i; + + /* ACPI */ + if (!port_node) + return true; if (priv->hw_version == MVPP21) return false; - for (i = 0; i < 5; i++) { - ret = of_property_match_string(port_node, "interrupt-names", - irqs[i]); - if (ret < 0) + if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) + return true; + + for (i = 0; i < MVPP2_MAX_THREADS; i++) { + snprintf(name, 5, "hif%d", i); + if (of_property_match_string(port_node, "interrupt-names", + name) < 0) return false; } @@ -4601,23 +4704,21 @@ static int mvpp2_port_probe(struct platform_device *pdev, struct resource *res; struct phylink *phylink; char *mac_from = ""; - unsigned int ntxqs, nrxqs; + unsigned int ntxqs, nrxqs, thread; + unsigned long flags = 0; bool has_tx_irqs; u32 id; int features; int phy_mode; - int err, i, cpu; + int err, i; - if (port_node) { - has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); - } else { - has_tx_irqs = true; - queue_mode = MVPP2_QDIST_MULTI_MODE; + has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); + if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { + dev_err(&pdev->dev, + "not enough IRQs to support multi queue mode\n"); + return -EINVAL; } - if (!has_tx_irqs) - queue_mode = MVPP2_QDIST_SINGLE_MODE; - ntxqs = MVPP2_MAX_TXQ; if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); @@ -4665,6 +4766,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->nrxqs = nrxqs; port->priv = priv; port->has_tx_irqs = has_tx_irqs; + port->flags = flags; err = mvpp2_queue_vectors_init(port, port_node); if (err) @@ -4761,8 +4863,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, } if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + for (thread = 0; thread < priv->nthreads; thread++) { + port_pcpu = per_cpu_ptr(port->pcpu, thread); hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); @@ -5046,13 +5148,13 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) } /* Allocate and initialize aggregated TXQs */ - priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), + priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, sizeof(*priv->aggr_txqs), GFP_KERNEL); if (!priv->aggr_txqs) return -ENOMEM; - for_each_present_cpu(i) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { priv->aggr_txqs[i].id = i; priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); @@ -5099,7 +5201,7 @@ static int mvpp2_probe(struct platform_device *pdev) struct mvpp2 *priv; struct resource *res; void __iomem *base; - int i; + int i, shared; int err; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@ -5164,6 +5266,15 @@ static int mvpp2_probe(struct platform_device *pdev) mvpp2_setup_bm_pool(); + + priv->nthreads = min_t(unsigned int, num_present_cpus(), + MVPP2_MAX_THREADS); + + shared = num_present_cpus() - priv->nthreads; + if (shared > 0) + bitmap_fill(&priv->lock_map, + min_t(int, shared, MVPP2_MAX_THREADS)); + for (i = 0; i < MVPP2_MAX_THREADS; i++) { u32 addr_space_sz; @@ -5338,7 +5449,7 @@ static int mvpp2_remove(struct platform_device *pdev) mvpp2_bm_pool_destroy(pdev, priv, bm_pool); } - for_each_present_cpu(i) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; dma_free_coherent(&pdev->dev, diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig new file mode 100644 index 000000000000..35827bdf1878 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -0,0 +1,17 @@ +# +# Marvell OcteonTX2 drivers configuration +# + +config OCTEONTX2_MBOX + tristate + +config OCTEONTX2_AF + tristate "Marvell OcteonTX2 RVU Admin Function driver" + select OCTEONTX2_MBOX + depends on (64BIT && COMPILE_TEST) || ARM64 + depends on PCI + help + This driver supports Marvell's OcteonTX2 Resource Virtualization + Unit's admin function manager which manages all RVU HW resources + and provides a medium to other PF/VFs to configure HW. Should be + enabled for other RVU device drivers to work. diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile new file mode 100644 index 000000000000..e579dcd54c97 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Marvell OcteonTX2 device drivers. +# + +obj-$(CONFIG_OCTEONTX2_AF) += af/ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile new file mode 100644 index 000000000000..06329acf9c2c --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Marvell's OcteonTX2 RVU Admin Function driver +# + +obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o +obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o + +octeontx2_mbox-y := mbox.o +octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ + rvu_reg.o rvu_npc.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c new file mode 100644 index 000000000000..12db256c8c9f --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -0,0 +1,721 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/acpi.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/phy.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> + +#include "cgx.h" + +#define DRV_NAME "octeontx2-cgx" +#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver" + +/** + * struct lmac + * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion + * @cmd_lock: Lock to serialize the command interface + * @resp: command response + * @link_info: link related information + * @event_cb: callback for linkchange events + * @cmd_pend: flag set before new command is started + * flag cleared after command response is received + * @cgx: parent cgx port + * @lmac_id: lmac port id + * @name: lmac port name + */ +struct lmac { + wait_queue_head_t wq_cmd_cmplt; + struct mutex cmd_lock; + u64 resp; + struct cgx_link_user_info link_info; + struct cgx_event_cb event_cb; + bool cmd_pend; + struct cgx *cgx; + u8 lmac_id; + char *name; +}; + +struct cgx { + void __iomem *reg_base; + struct pci_dev *pdev; + u8 cgx_id; + u8 lmac_count; + struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; + struct list_head cgx_list; +}; + +static LIST_HEAD(cgx_list); + +/* Convert firmware speed encoding to user format(Mbps) */ +static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX]; + +/* Convert firmware lmac type encoding to string */ +static char *cgx_lmactype_string[LMAC_MODE_MAX]; + +/* Supported devices */ +static const struct pci_device_id cgx_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, + { 0, } /* end of table */ +}; + +MODULE_DEVICE_TABLE(pci, cgx_id_table); + +static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) +{ + writeq(val, cgx->reg_base + (lmac << 18) + offset); +} + +static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) +{ + return readq(cgx->reg_base + (lmac << 18) + offset); +} + +static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) +{ + if (!cgx || lmac_id >= MAX_LMAC_PER_CGX) + return NULL; + + return cgx->lmac_idmap[lmac_id]; +} + +int cgx_get_cgx_cnt(void) +{ + struct cgx *cgx_dev; + int count = 0; + + list_for_each_entry(cgx_dev, &cgx_list, cgx_list) + count++; + + return count; +} +EXPORT_SYMBOL(cgx_get_cgx_cnt); + +int cgx_get_lmac_cnt(void *cgxd) +{ + struct cgx *cgx = cgxd; + + if (!cgx) + return -ENODEV; + + return cgx->lmac_count; +} +EXPORT_SYMBOL(cgx_get_lmac_cnt); + +void *cgx_get_pdata(int cgx_id) +{ + struct cgx *cgx_dev; + + list_for_each_entry(cgx_dev, &cgx_list, cgx_list) { + if (cgx_dev->cgx_id == cgx_id) + return cgx_dev; + } + return NULL; +} +EXPORT_SYMBOL(cgx_get_pdata); + +/* Ensure the required lock for event queue(where asynchronous events are + * posted) is acquired before calling this API. Else an asynchronous event(with + * latest link status) can reach the destination before this function returns + * and could make the link status appear wrong. + */ +int cgx_get_link_info(void *cgxd, int lmac_id, + struct cgx_link_user_info *linfo) +{ + struct lmac *lmac = lmac_pdata(lmac_id, cgxd); + + if (!lmac) + return -ENODEV; + + *linfo = lmac->link_info; + return 0; +} +EXPORT_SYMBOL(cgx_get_link_info); + +static u64 mac2u64 (u8 *mac_addr) +{ + u64 mac = 0; + int index; + + for (index = ETH_ALEN - 1; index >= 0; index--) + mac |= ((u64)*mac_addr++) << (8 * index); + return mac; +} + +int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + u64 cfg; + + /* copy 6bytes from macaddr */ + /* memcpy(&cfg, mac_addr, 6); */ + + cfg = mac2u64 (mac_addr); + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)), + cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49)); + + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg |= CGX_DMAC_CTL0_CAM_ENABLE; + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return 0; +} +EXPORT_SYMBOL(cgx_lmac_addr_set); + +u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + u64 cfg; + + cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8); + return cfg & CGX_RX_DMAC_ADR_MASK; +} +EXPORT_SYMBOL(cgx_lmac_addr_get); + +int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind) +{ + struct cgx *cgx = cgxd; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + + cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F)); + return 0; +} +EXPORT_SYMBOL(cgx_set_pkind); + +static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id) +{ + u64 cfg; + + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); + return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK; +} + +/* Configure CGX LMAC in internal loopback mode */ +int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u8 lmac_type; + u64 cfg; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + + lmac_type = cgx_get_lmac_type(cgx, lmac_id); + if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) { + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL); + if (enable) + cfg |= CGXX_GMP_PCS_MRX_CTL_LBK; + else + cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK; + cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg); + } else { + cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1); + if (enable) + cfg |= CGXX_SPUX_CONTROL1_LBK; + else + cfg &= ~CGXX_SPUX_CONTROL1_LBK; + cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg); + } + return 0; +} +EXPORT_SYMBOL(cgx_lmac_internal_loopback); + +void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) +{ + struct cgx *cgx = cgx_get_pdata(cgx_id); + u64 cfg = 0; + + if (!cgx) + return; + + if (enable) { + /* Enable promiscuous mode on LMAC */ + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE); + cfg |= CGX_DMAC_BCAST_MODE; + cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); + cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + } else { + /* Disable promiscuous mode */ + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE; + cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + } +} +EXPORT_SYMBOL(cgx_lmac_promisc_config); + +int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) +{ + struct cgx *cgx = cgxd; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); + return 0; +} +EXPORT_SYMBOL(cgx_get_rx_stats); + +int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat) +{ + struct cgx *cgx = cgxd; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8)); + return 0; +} +EXPORT_SYMBOL(cgx_get_tx_stats); + +int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); + if (enable) + cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN; + else + cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN); + cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); + return 0; +} +EXPORT_SYMBOL(cgx_lmac_rx_tx_enable); + +/* CGX Firmware interface low level support */ +static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) +{ + struct cgx *cgx = lmac->cgx; + struct device *dev; + int err = 0; + u64 cmd; + + /* Ensure no other command is in progress */ + err = mutex_lock_interruptible(&lmac->cmd_lock); + if (err) + return err; + + /* Ensure command register is free */ + cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG); + if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) { + err = -EBUSY; + goto unlock; + } + + /* Update ownership in command request */ + req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req); + + /* Mark this lmac as pending, before we start */ + lmac->cmd_pend = true; + + /* Start command in hardware */ + cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req); + + /* Ensure command is completed without errors */ + if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend, + msecs_to_jiffies(CGX_CMD_TIMEOUT))) { + dev = &cgx->pdev->dev; + dev_err(dev, "cgx port %d:%d cmd timeout\n", + cgx->cgx_id, lmac->lmac_id); + err = -EIO; + goto unlock; + } + + /* we have a valid command response */ + smp_rmb(); /* Ensure the latest updates are visible */ + *resp = lmac->resp; + +unlock: + mutex_unlock(&lmac->cmd_lock); + + return err; +} + +static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp, + struct cgx *cgx, int lmac_id) +{ + struct lmac *lmac; + int err; + + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return -ENODEV; + + err = cgx_fwi_cmd_send(req, resp, lmac); + + /* Check for valid response */ + if (!err) { + if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL) + return -EIO; + else + return 0; + } + + return err; +} + +static inline void cgx_link_usertable_init(void) +{ + cgx_speed_mbps[CGX_LINK_NONE] = 0; + cgx_speed_mbps[CGX_LINK_10M] = 10; + cgx_speed_mbps[CGX_LINK_100M] = 100; + cgx_speed_mbps[CGX_LINK_1G] = 1000; + cgx_speed_mbps[CGX_LINK_2HG] = 2500; + cgx_speed_mbps[CGX_LINK_5G] = 5000; + cgx_speed_mbps[CGX_LINK_10G] = 10000; + cgx_speed_mbps[CGX_LINK_20G] = 20000; + cgx_speed_mbps[CGX_LINK_25G] = 25000; + cgx_speed_mbps[CGX_LINK_40G] = 40000; + cgx_speed_mbps[CGX_LINK_50G] = 50000; + cgx_speed_mbps[CGX_LINK_100G] = 100000; + + cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII"; + cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI"; + cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI"; + cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R"; + cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R"; + cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII"; + cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R"; + cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R"; + cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R"; + cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII"; +} + +static inline void link_status_user_format(u64 lstat, + struct cgx_link_user_info *linfo, + struct cgx *cgx, u8 lmac_id) +{ + char *lmac_string; + + linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); + linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); + linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)]; + linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id); + lmac_string = cgx_lmactype_string[linfo->lmac_type_id]; + strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1); +} + +/* Hardware event handlers */ +static inline void cgx_link_change_handler(u64 lstat, + struct lmac *lmac) +{ + struct cgx_link_user_info *linfo; + struct cgx *cgx = lmac->cgx; + struct cgx_link_event event; + struct device *dev; + int err_type; + + dev = &cgx->pdev->dev; + + link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id); + err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat); + + event.cgx_id = cgx->cgx_id; + event.lmac_id = lmac->lmac_id; + + /* update the local copy of link status */ + lmac->link_info = event.link_uinfo; + linfo = &lmac->link_info; + + if (!lmac->event_cb.notify_link_chg) { + dev_dbg(dev, "cgx port %d:%d Link change handler null", + cgx->cgx_id, lmac->lmac_id); + if (err_type != CGX_ERR_NONE) { + dev_err(dev, "cgx port %d:%d Link error %d\n", + cgx->cgx_id, lmac->lmac_id, err_type); + } + dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n", + cgx->cgx_id, lmac->lmac_id, + linfo->link_up ? "UP" : "DOWN", linfo->speed); + return; + } + + if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) + dev_err(dev, "event notification failure\n"); +} + +static inline bool cgx_cmdresp_is_linkevent(u64 event) +{ + u8 id; + + id = FIELD_GET(EVTREG_ID, event); + if (id == CGX_CMD_LINK_BRING_UP || + id == CGX_CMD_LINK_BRING_DOWN) + return true; + else + return false; +} + +static inline bool cgx_event_is_linkevent(u64 event) +{ + if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE) + return true; + else + return false; +} + +static irqreturn_t cgx_fwi_event_handler(int irq, void *data) +{ + struct lmac *lmac = data; + struct cgx *cgx; + u64 event; + + cgx = lmac->cgx; + + event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG); + + if (!FIELD_GET(EVTREG_ACK, event)) + return IRQ_NONE; + + switch (FIELD_GET(EVTREG_EVT_TYPE, event)) { + case CGX_EVT_CMD_RESP: + /* Copy the response. Since only one command is active at a + * time, there is no way a response can get overwritten + */ + lmac->resp = event; + /* Ensure response is updated before thread context starts */ + smp_wmb(); + + /* There wont be separate events for link change initiated from + * software; Hence report the command responses as events + */ + if (cgx_cmdresp_is_linkevent(event)) + cgx_link_change_handler(event, lmac); + + /* Release thread waiting for completion */ + lmac->cmd_pend = false; + wake_up_interruptible(&lmac->wq_cmd_cmplt); + break; + case CGX_EVT_ASYNC: + if (cgx_event_is_linkevent(event)) + cgx_link_change_handler(event, lmac); + break; + } + + /* Any new event or command response will be posted by firmware + * only after the current status is acked. + * Ack the interrupt register as well. + */ + cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0); + cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT); + + return IRQ_HANDLED; +} + +/* APIs for PHY management using CGX firmware interface */ + +/* callback registration for hardware events like link change */ +int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id) +{ + struct cgx *cgx = cgxd; + struct lmac *lmac; + + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return -ENODEV; + + lmac->event_cb = *cb; + + return 0; +} +EXPORT_SYMBOL(cgx_lmac_evh_register); + +static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) +{ + u64 req = 0; + + req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); + return cgx_fwi_cmd_generic(req, resp, cgx, 0); +} + +static int cgx_lmac_verify_fwi_version(struct cgx *cgx) +{ + struct device *dev = &cgx->pdev->dev; + int major_ver, minor_ver; + u64 resp; + int err; + + if (!cgx->lmac_count) + return 0; + + err = cgx_fwi_read_version(&resp, cgx); + if (err) + return err; + + major_ver = FIELD_GET(RESP_MAJOR_VER, resp); + minor_ver = FIELD_GET(RESP_MINOR_VER, resp); + dev_dbg(dev, "Firmware command interface version = %d.%d\n", + major_ver, minor_ver); + if (major_ver != CGX_FIRMWARE_MAJOR_VER || + minor_ver != CGX_FIRMWARE_MINOR_VER) + return -EIO; + else + return 0; +} + +static int cgx_lmac_init(struct cgx *cgx) +{ + struct lmac *lmac; + int i, err; + + cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7; + if (cgx->lmac_count > MAX_LMAC_PER_CGX) + cgx->lmac_count = MAX_LMAC_PER_CGX; + + for (i = 0; i < cgx->lmac_count; i++) { + lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL); + if (!lmac) + return -ENOMEM; + lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); + if (!lmac->name) + return -ENOMEM; + sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); + lmac->lmac_id = i; + lmac->cgx = cgx; + init_waitqueue_head(&lmac->wq_cmd_cmplt); + mutex_init(&lmac->cmd_lock); + err = request_irq(pci_irq_vector(cgx->pdev, + CGX_LMAC_FWI + i * 9), + cgx_fwi_event_handler, 0, lmac->name, lmac); + if (err) + return err; + + /* Enable interrupt */ + cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S, + FW_CGX_INT); + + /* Add reference */ + cgx->lmac_idmap[i] = lmac; + } + + return cgx_lmac_verify_fwi_version(cgx); +} + +static int cgx_lmac_exit(struct cgx *cgx) +{ + struct lmac *lmac; + int i; + + /* Free all lmac related resources */ + for (i = 0; i < cgx->lmac_count; i++) { + lmac = cgx->lmac_idmap[i]; + if (!lmac) + continue; + free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac); + kfree(lmac->name); + kfree(lmac); + } + + return 0; +} + +static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct cgx *cgx; + int err, nvec; + + cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL); + if (!cgx) + return -ENOMEM; + cgx->pdev = pdev; + + pci_set_drvdata(pdev, cgx); + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + pci_set_drvdata(pdev, NULL); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + /* MAP configuration registers */ + cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!cgx->reg_base) { + dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + + nvec = CGX_NVEC; + err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); + if (err < 0 || err != nvec) { + dev_err(dev, "Request for %d msix vectors failed, err %d\n", + nvec, err); + goto err_release_regions; + } + + list_add(&cgx->cgx_list, &cgx_list); + cgx->cgx_id = cgx_get_cgx_cnt() - 1; + + cgx_link_usertable_init(); + + err = cgx_lmac_init(cgx); + if (err) + goto err_release_lmac; + + return 0; + +err_release_lmac: + cgx_lmac_exit(cgx); + list_del(&cgx->cgx_list); +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void cgx_remove(struct pci_dev *pdev) +{ + struct cgx *cgx = pci_get_drvdata(pdev); + + cgx_lmac_exit(cgx); + list_del(&cgx->cgx_list); + pci_free_irq_vectors(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +struct pci_driver cgx_driver = { + .name = DRV_NAME, + .id_table = cgx_id_table, + .probe = cgx_probe, + .remove = cgx_remove, +}; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h new file mode 100644 index 000000000000..0a66d2717442 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CGX_H +#define CGX_H + +#include "mbox.h" +#include "cgx_fw_if.h" + + /* PCI device IDs */ +#define PCI_DEVID_OCTEONTX2_CGX 0xA059 + +/* PCI BAR nos */ +#define PCI_CFG_REG_BAR_NUM 0 + +#define MAX_CGX 3 +#define MAX_LMAC_PER_CGX 4 +#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) + +/* Registers */ +#define CGXX_CMRX_CFG 0x00 +#define CMR_EN BIT_ULL(55) +#define DATA_PKT_TX_EN BIT_ULL(53) +#define DATA_PKT_RX_EN BIT_ULL(54) +#define CGX_LMAC_TYPE_SHIFT 40 +#define CGX_LMAC_TYPE_MASK 0xF +#define CGXX_CMRX_INT 0x040 +#define FW_CGX_INT BIT_ULL(1) +#define CGXX_CMRX_INT_ENA_W1S 0x058 +#define CGXX_CMRX_RX_ID_MAP 0x060 +#define CGXX_CMRX_RX_STAT0 0x070 +#define CGXX_CMRX_RX_LMACS 0x128 +#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8 +#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) +#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) +#define CGX_DMAC_MCAST_MODE BIT_ULL(1) +#define CGX_DMAC_BCAST_MODE BIT_ULL(0) +#define CGXX_CMRX_RX_DMAC_CAM0 0x200 +#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) +#define CGXX_CMRX_RX_DMAC_CAM1 0x400 +#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) +#define CGXX_CMRX_TX_STAT0 0x700 +#define CGXX_SCRATCH0_REG 0x1050 +#define CGXX_SCRATCH1_REG 0x1058 +#define CGX_CONST 0x2000 +#define CGXX_SPUX_CONTROL1 0x10000 +#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14) +#define CGXX_GMP_PCS_MRX_CTL 0x30000 +#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14) + +#define CGX_COMMAND_REG CGXX_SCRATCH1_REG +#define CGX_EVENT_REG CGXX_SCRATCH0_REG +#define CGX_CMD_TIMEOUT 2200 /* msecs */ + +#define CGX_NVEC 37 +#define CGX_LMAC_FWI 0 + +enum LMAC_TYPE { + LMAC_MODE_SGMII = 0, + LMAC_MODE_XAUI = 1, + LMAC_MODE_RXAUI = 2, + LMAC_MODE_10G_R = 3, + LMAC_MODE_40G_R = 4, + LMAC_MODE_QSGMII = 6, + LMAC_MODE_25G_R = 7, + LMAC_MODE_50G_R = 8, + LMAC_MODE_100G_R = 9, + LMAC_MODE_USXGMII = 10, + LMAC_MODE_MAX, +}; + +struct cgx_link_event { + struct cgx_link_user_info link_uinfo; + u8 cgx_id; + u8 lmac_id; +}; + +/** + * struct cgx_event_cb + * @notify_link_chg: callback for link change notification + * @data: data passed to callback function + */ +struct cgx_event_cb { + int (*notify_link_chg)(struct cgx_link_event *event, void *data); + void *data; +}; + +extern struct pci_driver cgx_driver; + +int cgx_get_cgx_cnt(void); +int cgx_get_lmac_cnt(void *cgxd); +void *cgx_get_pdata(int cgx_id); +int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind); +int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id); +int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat); +int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); +int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); +int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr); +u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id); +void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable); +int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); +int cgx_get_link_info(void *cgxd, int lmac_id, + struct cgx_link_user_info *linfo); +#endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h new file mode 100644 index 000000000000..fa17af3f4ba7 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __CGX_FW_INTF_H__ +#define __CGX_FW_INTF_H__ + +#include <linux/bitops.h> +#include <linux/bitfield.h> + +#define CGX_FIRMWARE_MAJOR_VER 1 +#define CGX_FIRMWARE_MINOR_VER 0 + +#define CGX_EVENT_ACK 1UL + +/* CGX error types. set for cmd response status as CGX_STAT_FAIL */ +enum cgx_error_type { + CGX_ERR_NONE, + CGX_ERR_LMAC_NOT_ENABLED, + CGX_ERR_LMAC_MODE_INVALID, + CGX_ERR_REQUEST_ID_INVALID, + CGX_ERR_PREV_ACK_NOT_CLEAR, + CGX_ERR_PHY_LINK_DOWN, + CGX_ERR_PCS_RESET_FAIL, + CGX_ERR_AN_CPT_FAIL, + CGX_ERR_TX_NOT_IDLE, + CGX_ERR_RX_NOT_IDLE, + CGX_ERR_SPUX_BR_BLKLOCK_FAIL, + CGX_ERR_SPUX_RX_ALIGN_FAIL, + CGX_ERR_SPUX_TX_FAULT, + CGX_ERR_SPUX_RX_FAULT, + CGX_ERR_SPUX_RESET_FAIL, + CGX_ERR_SPUX_AN_RESET_FAIL, + CGX_ERR_SPUX_USX_AN_RESET_FAIL, + CGX_ERR_SMUX_RX_LINK_NOT_OK, + CGX_ERR_PCS_RECV_LINK_FAIL, + CGX_ERR_TRAINING_FAIL, + CGX_ERR_RX_EQU_FAIL, + CGX_ERR_SPUX_BER_FAIL, + CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */ +}; + +/* LINK speed types */ +enum cgx_link_speed { + CGX_LINK_NONE, + CGX_LINK_10M, + CGX_LINK_100M, + CGX_LINK_1G, + CGX_LINK_2HG, + CGX_LINK_5G, + CGX_LINK_10G, + CGX_LINK_20G, + CGX_LINK_25G, + CGX_LINK_40G, + CGX_LINK_50G, + CGX_LINK_100G, + CGX_LINK_SPEED_MAX, +}; + +/* REQUEST ID types. Input to firmware */ +enum cgx_cmd_id { + CGX_CMD_NONE, + CGX_CMD_GET_FW_VER, + CGX_CMD_GET_MAC_ADDR, + CGX_CMD_SET_MTU, + CGX_CMD_GET_LINK_STS, /* optional to user */ + CGX_CMD_LINK_BRING_UP, + CGX_CMD_LINK_BRING_DOWN, + CGX_CMD_INTERNAL_LBK, + CGX_CMD_EXTERNAL_LBK, + CGX_CMD_HIGIG, + CGX_CMD_LINK_STATE_CHANGE, + CGX_CMD_MODE_CHANGE, /* hot plug support */ + CGX_CMD_INTF_SHUTDOWN, + CGX_CMD_IRQ_ENABLE, + CGX_CMD_IRQ_DISABLE, +}; + +/* async event ids */ +enum cgx_evt_id { + CGX_EVT_NONE, + CGX_EVT_LINK_CHANGE, +}; + +/* event types - cause of interrupt */ +enum cgx_evt_type { + CGX_EVT_ASYNC, + CGX_EVT_CMD_RESP +}; + +enum cgx_stat { + CGX_STAT_SUCCESS, + CGX_STAT_FAIL +}; + +enum cgx_cmd_own { + CGX_CMD_OWN_NS, + CGX_CMD_OWN_FIRMWARE, +}; + +/* m - bit mask + * y - value to be written in the bitrange + * x - input value whose bitrange to be modified + */ +#define FIELD_SET(m, y, x) \ + (((x) & ~(m)) | \ + FIELD_PREP((m), (y))) + +/* scratchx(0) CSR used for ATF->non-secure SW communication. + * This acts as the status register + * Provides details on command ack/status, command response, error details + */ +#define EVTREG_ACK BIT_ULL(0) +#define EVTREG_EVT_TYPE BIT_ULL(1) +#define EVTREG_STAT BIT_ULL(2) +#define EVTREG_ID GENMASK_ULL(8, 3) + +/* Response to command IDs with command status as CGX_STAT_FAIL + * + * Not applicable for commands : + * CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE + */ +#define EVTREG_ERRTYPE GENMASK_ULL(18, 9) + +/* Response to cmd ID as CGX_CMD_GET_FW_VER with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_MAJOR_VER GENMASK_ULL(12, 9) +#define RESP_MINOR_VER GENMASK_ULL(16, 13) + +/* Response to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_MAC_ADDR GENMASK_ULL(56, 9) + +/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE + * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS + * + * In case of CGX_STAT_FAIL, it indicates CGX configuration failed + * when processing link up/down/change command. + * Both err_type and current link status will be updated + * + * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current + * link status will be updated + */ +struct cgx_lnk_sts { + uint64_t reserved1:9; + uint64_t link_up:1; + uint64_t full_duplex:1; + uint64_t speed:4; /* cgx_link_speed */ + uint64_t err_type:10; + uint64_t reserved2:39; +}; + +#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9) +#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10) +#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11) +#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15) + +/* scratchx(1) CSR used for non-secure SW->ATF communication + * This CSR acts as a command register + */ +#define CMDREG_OWN BIT_ULL(0) +#define CMDREG_ID GENMASK_ULL(7, 2) + +/* Any command using enable/disable as an argument need + * to set this bitfield. + * Ex: Loopback, HiGig... + */ +#define CMDREG_ENABLE BIT_ULL(8) + +/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */ +#define CMDMTU_SIZE GENMASK_ULL(23, 8) + +/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */ +#define CMDLINKCHANGE_LINKUP BIT_ULL(8) +#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9) +#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10) + +#endif /* __CGX_FW_INTF_H__ */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h new file mode 100644 index 000000000000..d39ada404c8f --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef COMMON_H +#define COMMON_H + +#include "rvu_struct.h" + +#define OTX2_ALIGN 128 /* Align to cacheline */ + +#define Q_SIZE_16 0ULL /* 16 entries */ +#define Q_SIZE_64 1ULL /* 64 entries */ +#define Q_SIZE_256 2ULL +#define Q_SIZE_1K 3ULL +#define Q_SIZE_4K 4ULL +#define Q_SIZE_16K 5ULL +#define Q_SIZE_64K 6ULL +#define Q_SIZE_256K 7ULL +#define Q_SIZE_1M 8ULL /* Million entries */ +#define Q_SIZE_MIN Q_SIZE_16 +#define Q_SIZE_MAX Q_SIZE_1M + +#define Q_COUNT(x) (16ULL << (2 * x)) +#define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2) + +/* Admin queue info */ + +/* Since we intend to add only one instruction at a time, + * keep queue size to it's minimum. + */ +#define AQ_SIZE Q_SIZE_16 +/* HW head & tail pointer mask */ +#define AQ_PTR_MASK 0xFFFFF + +struct qmem { + void *base; + dma_addr_t iova; + int alloc_sz; + u8 entry_sz; + u8 align; + u32 qsize; +}; + +static inline int qmem_alloc(struct device *dev, struct qmem **q, + int qsize, int entry_sz) +{ + struct qmem *qmem; + int aligned_addr; + + if (!qsize) + return -EINVAL; + + *q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL); + if (!*q) + return -ENOMEM; + qmem = *q; + + qmem->entry_sz = entry_sz; + qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN; + qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz, + &qmem->iova, GFP_KERNEL); + if (!qmem->base) + return -ENOMEM; + + qmem->qsize = qsize; + + aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN); + qmem->align = (aligned_addr - qmem->iova); + qmem->base += qmem->align; + qmem->iova += qmem->align; + return 0; +} + +static inline void qmem_free(struct device *dev, struct qmem *qmem) +{ + if (!qmem) + return; + + if (qmem->base) + dma_free_coherent(dev, qmem->alloc_sz, + qmem->base - qmem->align, + qmem->iova - qmem->align); + devm_kfree(dev, qmem); +} + +struct admin_queue { + struct qmem *inst; + struct qmem *res; + spinlock_t lock; /* Serialize inst enqueue from PFs */ +}; + +/* NPA aura count */ +enum npa_aura_sz { + NPA_AURA_SZ_0, + NPA_AURA_SZ_128, + NPA_AURA_SZ_256, + NPA_AURA_SZ_512, + NPA_AURA_SZ_1K, + NPA_AURA_SZ_2K, + NPA_AURA_SZ_4K, + NPA_AURA_SZ_8K, + NPA_AURA_SZ_16K, + NPA_AURA_SZ_32K, + NPA_AURA_SZ_64K, + NPA_AURA_SZ_128K, + NPA_AURA_SZ_256K, + NPA_AURA_SZ_512K, + NPA_AURA_SZ_1M, + NPA_AURA_SZ_MAX, +}; + +#define NPA_AURA_COUNT(x) (1ULL << ((x) + 6)) + +/* NPA AQ result structure for init/read/write of aura HW contexts */ +struct npa_aq_aura_res { + struct npa_aq_res_s res; + struct npa_aura_s aura_ctx; + struct npa_aura_s ctx_mask; +}; + +/* NPA AQ result structure for init/read/write of pool HW contexts */ +struct npa_aq_pool_res { + struct npa_aq_res_s res; + struct npa_pool_s pool_ctx; + struct npa_pool_s ctx_mask; +}; + +/* NIX Transmit schedulers */ +enum nix_scheduler { + NIX_TXSCH_LVL_SMQ = 0x0, + NIX_TXSCH_LVL_MDQ = 0x0, + NIX_TXSCH_LVL_TL4 = 0x1, + NIX_TXSCH_LVL_TL3 = 0x2, + NIX_TXSCH_LVL_TL2 = 0x3, + NIX_TXSCH_LVL_TL1 = 0x4, + NIX_TXSCH_LVL_CNT = 0x5, +}; + +/* NIX RX action operation*/ +#define NIX_RX_ACTIONOP_DROP (0x0ull) +#define NIX_RX_ACTIONOP_UCAST (0x1ull) +#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull) +#define NIX_RX_ACTIONOP_MCAST (0x3ull) +#define NIX_RX_ACTIONOP_RSS (0x4ull) + +/* NIX TX action operation*/ +#define NIX_TX_ACTIONOP_DROP (0x0ull) +#define NIX_TX_ACTIONOP_UCAST_DEFAULT (0x1ull) +#define NIX_TX_ACTIONOP_UCAST_CHAN (0x2ull) +#define NIX_TX_ACTIONOP_MCAST (0x3ull) +#define NIX_TX_ACTIONOP_DROP_VIOL (0x5ull) + +#define NPC_MCAM_KEY_X1 0 +#define NPC_MCAM_KEY_X2 1 +#define NPC_MCAM_KEY_X4 2 + +#define NIX_INTF_RX 0 +#define NIX_INTF_TX 1 + +#define NIX_INTF_TYPE_CGX 0 +#define NIX_INTF_TYPE_LBK 1 + +#define MAX_LMAC_PKIND 12 +#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b)) +#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c)) + +/* NIX LSO format indices. + * As of now TSO is the only one using, so statically assigning indices. + */ +#define NIX_LSO_FORMAT_IDX_TSOV4 0 +#define NIX_LSO_FORMAT_IDX_TSOV6 1 + +/* RSS info */ +#define MAX_RSS_GROUPS 8 +/* Group 0 has to be used in default pkt forwarding MCAM entries + * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple + * filters. + */ +#define DEFAULT_RSS_CONTEXT_GROUP 0 +#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */ + +/* NIX flow tag, key type flags */ +#define FLOW_KEY_TYPE_PORT BIT(0) +#define FLOW_KEY_TYPE_IPV4 BIT(1) +#define FLOW_KEY_TYPE_IPV6 BIT(2) +#define FLOW_KEY_TYPE_TCP BIT(3) +#define FLOW_KEY_TYPE_UDP BIT(4) +#define FLOW_KEY_TYPE_SCTP BIT(5) + +/* NIX flow tag algorithm indices, max is 31 */ +enum { + FLOW_KEY_ALG_PORT, + FLOW_KEY_ALG_IP, + FLOW_KEY_ALG_TCP, + FLOW_KEY_ALG_UDP, + FLOW_KEY_ALG_SCTP, + FLOW_KEY_ALG_TCP_UDP, + FLOW_KEY_ALG_TCP_SCTP, + FLOW_KEY_ALG_UDP_SCTP, + FLOW_KEY_ALG_TCP_UDP_SCTP, + FLOW_KEY_ALG_MAX, +}; + +#endif /* COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c new file mode 100644 index 000000000000..85ba24a05774 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> + +#include "rvu_reg.h" +#include "mbox.h" + +static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + +void otx2_mbox_reset(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_hdr *tx_hdr, *rx_hdr; + + tx_hdr = mdev->mbase + mbox->tx_start; + rx_hdr = mdev->mbase + mbox->rx_start; + + spin_lock(&mdev->mbox_lock); + mdev->msg_size = 0; + mdev->rsp_size = 0; + tx_hdr->num_msgs = 0; + rx_hdr->num_msgs = 0; + spin_unlock(&mdev->mbox_lock); +} +EXPORT_SYMBOL(otx2_mbox_reset); + +void otx2_mbox_destroy(struct otx2_mbox *mbox) +{ + mbox->reg_base = NULL; + mbox->hwbase = NULL; + + kfree(mbox->dev); + mbox->dev = NULL; +} +EXPORT_SYMBOL(otx2_mbox_destroy); + +int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, + void *reg_base, int direction, int ndevs) +{ + struct otx2_mbox_dev *mdev; + int devid; + + switch (direction) { + case MBOX_DIR_AFPF: + case MBOX_DIR_PFVF: + mbox->tx_start = MBOX_DOWN_TX_START; + mbox->rx_start = MBOX_DOWN_RX_START; + mbox->tx_size = MBOX_DOWN_TX_SIZE; + mbox->rx_size = MBOX_DOWN_RX_SIZE; + break; + case MBOX_DIR_PFAF: + case MBOX_DIR_VFPF: + mbox->tx_start = MBOX_DOWN_RX_START; + mbox->rx_start = MBOX_DOWN_TX_START; + mbox->tx_size = MBOX_DOWN_RX_SIZE; + mbox->rx_size = MBOX_DOWN_TX_SIZE; + break; + case MBOX_DIR_AFPF_UP: + case MBOX_DIR_PFVF_UP: + mbox->tx_start = MBOX_UP_TX_START; + mbox->rx_start = MBOX_UP_RX_START; + mbox->tx_size = MBOX_UP_TX_SIZE; + mbox->rx_size = MBOX_UP_RX_SIZE; + break; + case MBOX_DIR_PFAF_UP: + case MBOX_DIR_VFPF_UP: + mbox->tx_start = MBOX_UP_RX_START; + mbox->rx_start = MBOX_UP_TX_START; + mbox->tx_size = MBOX_UP_RX_SIZE; + mbox->rx_size = MBOX_UP_TX_SIZE; + break; + default: + return -ENODEV; + } + + switch (direction) { + case MBOX_DIR_AFPF: + case MBOX_DIR_AFPF_UP: + mbox->trigger = RVU_AF_AFPF_MBOX0; + mbox->tr_shift = 4; + break; + case MBOX_DIR_PFAF: + case MBOX_DIR_PFAF_UP: + mbox->trigger = RVU_PF_PFAF_MBOX1; + mbox->tr_shift = 0; + break; + case MBOX_DIR_PFVF: + case MBOX_DIR_PFVF_UP: + mbox->trigger = RVU_PF_VFX_PFVF_MBOX0; + mbox->tr_shift = 12; + break; + case MBOX_DIR_VFPF: + case MBOX_DIR_VFPF_UP: + mbox->trigger = RVU_VF_VFPF_MBOX1; + mbox->tr_shift = 0; + break; + default: + return -ENODEV; + } + + mbox->reg_base = reg_base; + mbox->hwbase = hwbase; + mbox->pdev = pdev; + + mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL); + if (!mbox->dev) { + otx2_mbox_destroy(mbox); + return -ENOMEM; + } + + mbox->ndevs = ndevs; + for (devid = 0; devid < ndevs; devid++) { + mdev = &mbox->dev[devid]; + mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE); + spin_lock_init(&mdev->mbox_lock); + /* Init header to reset value */ + otx2_mbox_reset(mbox, devid); + } + + return 0; +} +EXPORT_SYMBOL(otx2_mbox_init); + +int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + int timeout = 0, sleep = 1; + + while (mdev->num_msgs != mdev->msgs_acked) { + msleep(sleep); + timeout += sleep; + if (timeout >= MBOX_RSP_TIMEOUT) + return -EIO; + } + return 0; +} +EXPORT_SYMBOL(otx2_mbox_wait_for_rsp); + +int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + unsigned long timeout = jiffies + 1 * HZ; + + while (!time_after(jiffies, timeout)) { + if (mdev->num_msgs == mdev->msgs_acked) + return 0; + cpu_relax(); + } + return -EIO; +} +EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp); + +void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_hdr *tx_hdr, *rx_hdr; + + tx_hdr = mdev->mbase + mbox->tx_start; + rx_hdr = mdev->mbase + mbox->rx_start; + + spin_lock(&mdev->mbox_lock); + /* Reset header for next messages */ + mdev->msg_size = 0; + mdev->rsp_size = 0; + mdev->msgs_acked = 0; + + /* Sync mbox data into memory */ + smp_wmb(); + + /* num_msgs != 0 signals to the peer that the buffer has a number of + * messages. So this should be written after writing all the messages + * to the shared memory. + */ + tx_hdr->num_msgs = mdev->num_msgs; + rx_hdr->num_msgs = 0; + spin_unlock(&mdev->mbox_lock); + + /* The interrupt should be fired after num_msgs is written + * to the shared memory + */ + writeq(1, (void __iomem *)mbox->reg_base + + (mbox->trigger | (devid << mbox->tr_shift))); +} +EXPORT_SYMBOL(otx2_mbox_msg_send); + +struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, + int size, int size_rsp) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_msghdr *msghdr = NULL; + + spin_lock(&mdev->mbox_lock); + size = ALIGN(size, MBOX_MSG_ALIGN); + size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN); + /* Check if there is space in mailbox */ + if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset) + goto exit; + if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset) + goto exit; + + if (mdev->msg_size == 0) + mdev->num_msgs = 0; + mdev->num_msgs++; + + msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size; + + /* Clear the whole msg region */ + memset(msghdr, 0, sizeof(*msghdr) + size); + /* Init message header with reset values */ + msghdr->ver = OTX2_MBOX_VERSION; + mdev->msg_size += size; + mdev->rsp_size += size_rsp; + msghdr->next_msgoff = mdev->msg_size + msgs_offset; +exit: + spin_unlock(&mdev->mbox_lock); + + return msghdr; +} +EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp); + +struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, + struct mbox_msghdr *msg) +{ + unsigned long imsg = mbox->tx_start + msgs_offset; + unsigned long irsp = mbox->rx_start + msgs_offset; + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + u16 msgs; + + if (mdev->num_msgs != mdev->msgs_acked) + return ERR_PTR(-ENODEV); + + for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { + struct mbox_msghdr *pmsg = mdev->mbase + imsg; + struct mbox_msghdr *prsp = mdev->mbase + irsp; + + if (msg == pmsg) { + if (pmsg->id != prsp->id) + return ERR_PTR(-ENODEV); + return prsp; + } + + imsg = pmsg->next_msgoff; + irsp = prsp->next_msgoff; + } + + return ERR_PTR(-ENODEV); +} +EXPORT_SYMBOL(otx2_mbox_get_rsp); + +int +otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id) +{ + struct msg_rsp *rsp; + + rsp = (struct msg_rsp *) + otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp)); + if (!rsp) + return -ENOMEM; + rsp->hdr.id = id; + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; + rsp->hdr.rc = MBOX_MSG_INVALID; + rsp->hdr.pcifunc = pcifunc; + return 0; +} +EXPORT_SYMBOL(otx2_reply_invalid_msg); + +bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + bool ret; + + spin_lock(&mdev->mbox_lock); + ret = mdev->num_msgs != 0; + spin_unlock(&mdev->mbox_lock); + + return ret; +} +EXPORT_SYMBOL(otx2_mbox_nonempty); + +const char *otx2_mbox_id2name(u16 id) +{ + switch (id) { +#define M(_name, _id, _1, _2) case _id: return # _name; + MBOX_MESSAGES +#undef M + default: + return "INVALID ID"; + } +} +EXPORT_SYMBOL(otx2_mbox_id2name); + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h new file mode 100644 index 000000000000..a15a59c9a239 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -0,0 +1,525 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MBOX_H +#define MBOX_H + +#include <linux/etherdevice.h> +#include <linux/sizes.h> + +#include "rvu_struct.h" +#include "common.h" + +#define MBOX_SIZE SZ_64K + +/* AF/PF: PF initiated, PF/VF VF initiated */ +#define MBOX_DOWN_RX_START 0 +#define MBOX_DOWN_RX_SIZE (46 * SZ_1K) +#define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE) +#define MBOX_DOWN_TX_SIZE (16 * SZ_1K) +/* AF/PF: AF initiated, PF/VF PF initiated */ +#define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE) +#define MBOX_UP_RX_SIZE SZ_1K +#define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE) +#define MBOX_UP_TX_SIZE SZ_1K + +#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE +# error "incorrect mailbox area sizes" +#endif + +#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull)) + +#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */ + +#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */ + +/* Mailbox directions */ +#define MBOX_DIR_AFPF 0 /* AF replies to PF */ +#define MBOX_DIR_PFAF 1 /* PF sends messages to AF */ +#define MBOX_DIR_PFVF 2 /* PF replies to VF */ +#define MBOX_DIR_VFPF 3 /* VF sends messages to PF */ +#define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */ +#define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */ +#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */ +#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */ + +struct otx2_mbox_dev { + void *mbase; /* This dev's mbox region */ + spinlock_t mbox_lock; + u16 msg_size; /* Total msg size to be sent */ + u16 rsp_size; /* Total rsp size to be sure the reply is ok */ + u16 num_msgs; /* No of msgs sent or waiting for response */ + u16 msgs_acked; /* No of msgs for which response is received */ +}; + +struct otx2_mbox { + struct pci_dev *pdev; + void *hwbase; /* Mbox region advertised by HW */ + void *reg_base;/* CSR base for this dev */ + u64 trigger; /* Trigger mbox notification */ + u16 tr_shift; /* Mbox trigger shift */ + u64 rx_start; /* Offset of Rx region in mbox memory */ + u64 tx_start; /* Offset of Tx region in mbox memory */ + u16 rx_size; /* Size of Rx region */ + u16 tx_size; /* Size of Tx region */ + u16 ndevs; /* The number of peers */ + struct otx2_mbox_dev *dev; +}; + +/* Header which preceeds all mbox messages */ +struct mbox_hdr { + u16 num_msgs; /* No of msgs embedded */ +}; + +/* Header which preceeds every msg and is also part of it */ +struct mbox_msghdr { + u16 pcifunc; /* Who's sending this msg */ + u16 id; /* Mbox message ID */ +#define OTX2_MBOX_REQ_SIG (0xdead) +#define OTX2_MBOX_RSP_SIG (0xbeef) + u16 sig; /* Signature, for validating corrupted msgs */ +#define OTX2_MBOX_VERSION (0x0001) + u16 ver; /* Version of msg's structure for this ID */ + u16 next_msgoff; /* Offset of next msg within mailbox region */ + int rc; /* Msg process'ed response code */ +}; + +void otx2_mbox_reset(struct otx2_mbox *mbox, int devid); +void otx2_mbox_destroy(struct otx2_mbox *mbox); +int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase, + struct pci_dev *pdev, void __force *reg_base, + int direction, int ndevs); +void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid); +int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid); +int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid); +struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, + int size, int size_rsp); +struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, + struct mbox_msghdr *msg); +int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, + u16 pcifunc, u16 id); +bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid); +const char *otx2_mbox_id2name(u16 id); +static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, + int devid, int size) +{ + return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0); +} + +/* Mailbox message types */ +#define MBOX_MSG_MASK 0xFFFF +#define MBOX_MSG_INVALID 0xFFFE +#define MBOX_MSG_MAX 0xFFFF + +#define MBOX_MESSAGES \ +/* Generic mbox IDs (range 0x000 - 0x1FF) */ \ +M(READY, 0x001, msg_req, ready_msg_rsp) \ +M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \ +M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \ +M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \ +/* CGX mbox IDs (range 0x200 - 0x3FF) */ \ +M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \ +M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \ +M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \ +M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \ + cgx_mac_addr_set_or_get) \ +M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \ + cgx_mac_addr_set_or_get) \ +M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \ +M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \ +M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \ +M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \ +M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \ +M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \ +M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \ +/* NPA mbox IDs (range 0x400 - 0x5FF) */ \ +M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \ +M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \ +M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \ +M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \ +/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ +/* TIM mbox IDs (range 0x800 - 0x9FF) */ \ +/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ +/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ +/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ +M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \ +M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \ +M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \ +M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \ +M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \ +M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \ +M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp) \ +M(NIX_STATS_RST, 0x8007, msg_req, msg_rsp) \ +M(NIX_VTAG_CFG, 0x8008, nix_vtag_config, msg_rsp) \ +M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, msg_rsp) \ +M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, msg_rsp) \ +M(NIX_SET_RX_MODE, 0x800b, nix_rx_mode, msg_rsp) + +/* Messages initiated by AF (range 0xC00 - 0xDFF) */ +#define MBOX_UP_CGX_MESSAGES \ +M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp) + +enum { +#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id, +MBOX_MESSAGES +MBOX_UP_CGX_MESSAGES +#undef M +}; + +/* Mailbox message formats */ + +#define RVU_DEFAULT_PF_FUNC 0xFFFF + +/* Generic request msg used for those mbox messages which + * don't send any data in the request. + */ +struct msg_req { + struct mbox_msghdr hdr; +}; + +/* Generic rsponse msg used a ack or response for those mbox + * messages which doesn't have a specific rsp msg format. + */ +struct msg_rsp { + struct mbox_msghdr hdr; +}; + +struct ready_msg_rsp { + struct mbox_msghdr hdr; + u16 sclk_feq; /* SCLK frequency */ +}; + +/* Structure for requesting resource provisioning. + * 'modify' flag to be used when either requesting more + * or to detach partial of a cetain resource type. + * Rest of the fields specify how many of what type to + * be attached. + */ +struct rsrc_attach { + struct mbox_msghdr hdr; + u8 modify:1; + u8 npalf:1; + u8 nixlf:1; + u16 sso; + u16 ssow; + u16 timlfs; + u16 cptlfs; +}; + +/* Structure for relinquishing resources. + * 'partial' flag to be used when relinquishing all resources + * but only of a certain type. If not set, all resources of all + * types provisioned to the RVU function will be detached. + */ +struct rsrc_detach { + struct mbox_msghdr hdr; + u8 partial:1; + u8 npalf:1; + u8 nixlf:1; + u8 sso:1; + u8 ssow:1; + u8 timlfs:1; + u8 cptlfs:1; +}; + +#define MSIX_VECTOR_INVALID 0xFFFF +#define MAX_RVU_BLKLF_CNT 256 + +struct msix_offset_rsp { + struct mbox_msghdr hdr; + u16 npa_msixoff; + u16 nix_msixoff; + u8 sso; + u8 ssow; + u8 timlfs; + u8 cptlfs; + u16 sso_msixoff[MAX_RVU_BLKLF_CNT]; + u16 ssow_msixoff[MAX_RVU_BLKLF_CNT]; + u16 timlf_msixoff[MAX_RVU_BLKLF_CNT]; + u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT]; +}; + +/* CGX mbox message formats */ + +struct cgx_stats_rsp { + struct mbox_msghdr hdr; +#define CGX_RX_STATS_COUNT 13 +#define CGX_TX_STATS_COUNT 18 + u64 rx_stats[CGX_RX_STATS_COUNT]; + u64 tx_stats[CGX_TX_STATS_COUNT]; +}; + +/* Structure for requesting the operation for + * setting/getting mac address in the CGX interface + */ +struct cgx_mac_addr_set_or_get { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; +}; + +struct cgx_link_user_info { + uint64_t link_up:1; + uint64_t full_duplex:1; + uint64_t lmac_type_id:4; + uint64_t speed:20; /* speed in Mbps */ +#define LMACTYPE_STR_LEN 16 + char lmac_type[LMACTYPE_STR_LEN]; +}; + +struct cgx_link_info_msg { + struct mbox_msghdr hdr; + struct cgx_link_user_info link_info; +}; + +/* NPA mbox message formats */ + +/* NPA mailbox error codes + * Range 301 - 400. + */ +enum npa_af_status { + NPA_AF_ERR_PARAM = -301, + NPA_AF_ERR_AQ_FULL = -302, + NPA_AF_ERR_AQ_ENQUEUE = -303, + NPA_AF_ERR_AF_LF_INVALID = -304, + NPA_AF_ERR_AF_LF_ALLOC = -305, + NPA_AF_ERR_LF_RESET = -306, +}; + +/* For NPA LF context alloc and init */ +struct npa_lf_alloc_req { + struct mbox_msghdr hdr; + int node; + int aura_sz; /* No of auras */ + u32 nr_pools; /* No of pools */ +}; + +struct npa_lf_alloc_rsp { + struct mbox_msghdr hdr; + u32 stack_pg_ptrs; /* No of ptrs per stack page */ + u32 stack_pg_bytes; /* Size of stack page */ + u16 qints; /* NPA_AF_CONST::QINTS */ +}; + +/* NPA AQ enqueue msg */ +struct npa_aq_enq_req { + struct mbox_msghdr hdr; + u32 aura_id; + u8 ctype; + u8 op; + union { + /* Valid when op == WRITE/INIT and ctype == AURA. + * LF fills the pool_id in aura.pool_addr. AF will translate + * the pool_id to pool context pointer. + */ + struct npa_aura_s aura; + /* Valid when op == WRITE/INIT and ctype == POOL */ + struct npa_pool_s pool; + }; + /* Mask data when op == WRITE (1=write, 0=don't write) */ + union { + /* Valid when op == WRITE and ctype == AURA */ + struct npa_aura_s aura_mask; + /* Valid when op == WRITE and ctype == POOL */ + struct npa_pool_s pool_mask; + }; +}; + +struct npa_aq_enq_rsp { + struct mbox_msghdr hdr; + union { + /* Valid when op == READ and ctype == AURA */ + struct npa_aura_s aura; + /* Valid when op == READ and ctype == POOL */ + struct npa_pool_s pool; + }; +}; + +/* Disable all contexts of type 'ctype' */ +struct hwctx_disable_req { + struct mbox_msghdr hdr; + u8 ctype; +}; + +/* NIX mailbox error codes + * Range 401 - 500. + */ +enum nix_af_status { + NIX_AF_ERR_PARAM = -401, + NIX_AF_ERR_AQ_FULL = -402, + NIX_AF_ERR_AQ_ENQUEUE = -403, + NIX_AF_ERR_AF_LF_INVALID = -404, + NIX_AF_ERR_AF_LF_ALLOC = -405, + NIX_AF_ERR_TLX_ALLOC_FAIL = -406, + NIX_AF_ERR_TLX_INVALID = -407, + NIX_AF_ERR_RSS_SIZE_INVALID = -408, + NIX_AF_ERR_RSS_GRPS_INVALID = -409, + NIX_AF_ERR_FRS_INVALID = -410, + NIX_AF_ERR_RX_LINK_INVALID = -411, + NIX_AF_INVAL_TXSCHQ_CFG = -412, + NIX_AF_SMQ_FLUSH_FAILED = -413, + NIX_AF_ERR_LF_RESET = -414, +}; + +/* For NIX LF context alloc and init */ +struct nix_lf_alloc_req { + struct mbox_msghdr hdr; + int node; + u32 rq_cnt; /* No of receive queues */ + u32 sq_cnt; /* No of send queues */ + u32 cq_cnt; /* No of completion queues */ + u8 xqe_sz; + u16 rss_sz; + u8 rss_grps; + u16 npa_func; + u16 sso_func; + u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */ +}; + +struct nix_lf_alloc_rsp { + struct mbox_msghdr hdr; + u16 sqb_size; + u16 rx_chan_base; + u16 tx_chan_base; + u8 rx_chan_cnt; /* total number of RX channels */ + u8 tx_chan_cnt; /* total number of TX channels */ + u8 lso_tsov4_idx; + u8 lso_tsov6_idx; + u8 mac_addr[ETH_ALEN]; +}; + +/* NIX AQ enqueue msg */ +struct nix_aq_enq_req { + struct mbox_msghdr hdr; + u32 qidx; + u8 ctype; + u8 op; + union { + struct nix_rq_ctx_s rq; + struct nix_sq_ctx_s sq; + struct nix_cq_ctx_s cq; + struct nix_rsse_s rss; + struct nix_rx_mce_s mce; + }; + union { + struct nix_rq_ctx_s rq_mask; + struct nix_sq_ctx_s sq_mask; + struct nix_cq_ctx_s cq_mask; + struct nix_rsse_s rss_mask; + struct nix_rx_mce_s mce_mask; + }; +}; + +struct nix_aq_enq_rsp { + struct mbox_msghdr hdr; + union { + struct nix_rq_ctx_s rq; + struct nix_sq_ctx_s sq; + struct nix_cq_ctx_s cq; + struct nix_rsse_s rss; + struct nix_rx_mce_s mce; + }; +}; + +/* Tx scheduler/shaper mailbox messages */ + +#define MAX_TXSCHQ_PER_FUNC 128 + +struct nix_txsch_alloc_req { + struct mbox_msghdr hdr; + /* Scheduler queue count request at each level */ + u16 schq_contig[NIX_TXSCH_LVL_CNT]; /* No of contiguous queues */ + u16 schq[NIX_TXSCH_LVL_CNT]; /* No of non-contiguous queues */ +}; + +struct nix_txsch_alloc_rsp { + struct mbox_msghdr hdr; + /* Scheduler queue count allocated at each level */ + u16 schq_contig[NIX_TXSCH_LVL_CNT]; + u16 schq[NIX_TXSCH_LVL_CNT]; + /* Scheduler queue list allocated at each level */ + u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; +}; + +struct nix_txsch_free_req { + struct mbox_msghdr hdr; +#define TXSCHQ_FREE_ALL BIT_ULL(0) + u16 flags; + /* Scheduler queue level to be freed */ + u16 schq_lvl; + /* List of scheduler queues to be freed */ + u16 schq; +}; + +struct nix_txschq_config { + struct mbox_msghdr hdr; + u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */ +#define TXSCHQ_IDX_SHIFT 16 +#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1) +#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK) + u8 num_regs; +#define MAX_REGS_PER_MBOX_MSG 20 + u64 reg[MAX_REGS_PER_MBOX_MSG]; + u64 regval[MAX_REGS_PER_MBOX_MSG]; +}; + +struct nix_vtag_config { + struct mbox_msghdr hdr; + u8 vtag_size; + /* cfg_type is '0' for tx vlan cfg + * cfg_type is '1' for rx vlan cfg + */ + u8 cfg_type; + union { + /* valid when cfg_type is '0' */ + struct { + /* tx vlan0 tag(C-VLAN) */ + u64 vlan0; + /* tx vlan1 tag(S-VLAN) */ + u64 vlan1; + /* insert tx vlan tag */ + u8 insert_vlan :1; + /* insert tx double vlan tag */ + u8 double_vlan :1; + } tx; + + /* valid when cfg_type is '1' */ + struct { + /* rx vtag type index */ + u8 vtag_type; + /* rx vtag strip */ + u8 strip_vtag :1; + /* rx vtag capture */ + u8 capture_vtag :1; + } rx; + }; +}; + +struct nix_rss_flowkey_cfg { + struct mbox_msghdr hdr; + int mcam_index; /* MCAM entry index to modify */ + u32 flowkey_cfg; /* Flowkey types selected */ + u8 group; /* RSS context or group */ +}; + +struct nix_set_mac_addr { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */ +}; + +struct nix_rx_mode { + struct mbox_msghdr hdr; +#define NIX_RX_MODE_UCAST BIT(0) +#define NIX_RX_MODE_PROMISC BIT(1) +#define NIX_RX_MODE_ALLMULTI BIT(2) + u16 mode; +}; + +#endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h new file mode 100644 index 000000000000..f98b0113def3 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -0,0 +1,262 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef NPC_H +#define NPC_H + +enum NPC_LID_E { + NPC_LID_LA = 0, + NPC_LID_LB, + NPC_LID_LC, + NPC_LID_LD, + NPC_LID_LE, + NPC_LID_LF, + NPC_LID_LG, + NPC_LID_LH, +}; + +#define NPC_LT_NA 0 + +enum npc_kpu_la_ltype { + NPC_LT_LA_8023 = 1, + NPC_LT_LA_ETHER, +}; + +enum npc_kpu_lb_ltype { + NPC_LT_LB_ETAG = 1, + NPC_LT_LB_CTAG, + NPC_LT_LB_STAG, + NPC_LT_LB_BTAG, + NPC_LT_LB_QINQ, + NPC_LT_LB_ITAG, +}; + +enum npc_kpu_lc_ltype { + NPC_LT_LC_IP = 1, + NPC_LT_LC_IP6, + NPC_LT_LC_ARP, + NPC_LT_LC_RARP, + NPC_LT_LC_MPLS, + NPC_LT_LC_NSH, + NPC_LT_LC_PTP, + NPC_LT_LC_FCOE, +}; + +/* Don't modify Ltypes upto SCTP, otherwise it will + * effect flow tag calculation and thus RSS. + */ +enum npc_kpu_ld_ltype { + NPC_LT_LD_TCP = 1, + NPC_LT_LD_UDP, + NPC_LT_LD_ICMP, + NPC_LT_LD_SCTP, + NPC_LT_LD_IGMP, + NPC_LT_LD_ICMP6, + NPC_LT_LD_ESP, + NPC_LT_LD_AH, + NPC_LT_LD_GRE, + NPC_LT_LD_GRE_MPLS, + NPC_LT_LD_GRE_NSH, + NPC_LT_LD_TU_MPLS, +}; + +enum npc_kpu_le_ltype { + NPC_LT_LE_TU_ETHER = 1, + NPC_LT_LE_TU_PPP, + NPC_LT_LE_TU_MPLS_IN_NSH, + NPC_LT_LE_TU_3RD_NSH, +}; + +enum npc_kpu_lf_ltype { + NPC_LT_LF_TU_IP = 1, + NPC_LT_LF_TU_IP6, + NPC_LT_LF_TU_ARP, + NPC_LT_LF_TU_MPLS_IP, + NPC_LT_LF_TU_MPLS_IP6, + NPC_LT_LF_TU_MPLS_ETHER, +}; + +enum npc_kpu_lg_ltype { + NPC_LT_LG_TU_TCP = 1, + NPC_LT_LG_TU_UDP, + NPC_LT_LG_TU_SCTP, + NPC_LT_LG_TU_ICMP, + NPC_LT_LG_TU_IGMP, + NPC_LT_LG_TU_ICMP6, + NPC_LT_LG_TU_ESP, + NPC_LT_LG_TU_AH, +}; + +enum npc_kpu_lh_ltype { + NPC_LT_LH_TCP_DATA = 1, + NPC_LT_LH_HTTP_DATA, + NPC_LT_LH_HTTPS_DATA, + NPC_LT_LH_PPTP_DATA, + NPC_LT_LH_UDP_DATA, +}; + +struct npc_kpu_profile_cam { + u8 state; + u8 state_mask; + u16 dp0; + u16 dp0_mask; + u16 dp1; + u16 dp1_mask; + u16 dp2; + u16 dp2_mask; +}; + +struct npc_kpu_profile_action { + u8 errlev; + u8 errcode; + u8 dp0_offset; + u8 dp1_offset; + u8 dp2_offset; + u8 bypass_count; + u8 parse_done; + u8 next_state; + u8 ptr_advance; + u8 cap_ena; + u8 lid; + u8 ltype; + u8 flags; + u8 offset; + u8 mask; + u8 right; + u8 shift; +}; + +struct npc_kpu_profile { + int cam_entries; + int action_entries; + struct npc_kpu_profile_cam *cam; + struct npc_kpu_profile_action *action; +}; + +/* NPC KPU register formats */ +struct npc_kpu_cam { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd_63_56 : 8; + u64 state : 8; + u64 dp2_data : 16; + u64 dp1_data : 16; + u64 dp0_data : 16; +#else + u64 dp0_data : 16; + u64 dp1_data : 16; + u64 dp2_data : 16; + u64 state : 8; + u64 rsvd_63_56 : 8; +#endif +}; + +struct npc_kpu_action0 { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd_63_57 : 7; + u64 byp_count : 3; + u64 capture_ena : 1; + u64 parse_done : 1; + u64 next_state : 8; + u64 rsvd_43 : 1; + u64 capture_lid : 3; + u64 capture_ltype : 4; + u64 capture_flags : 8; + u64 ptr_advance : 8; + u64 var_len_offset : 8; + u64 var_len_mask : 8; + u64 var_len_right : 1; + u64 var_len_shift : 3; +#else + u64 var_len_shift : 3; + u64 var_len_right : 1; + u64 var_len_mask : 8; + u64 var_len_offset : 8; + u64 ptr_advance : 8; + u64 capture_flags : 8; + u64 capture_ltype : 4; + u64 capture_lid : 3; + u64 rsvd_43 : 1; + u64 next_state : 8; + u64 parse_done : 1; + u64 capture_ena : 1; + u64 byp_count : 3; + u64 rsvd_63_57 : 7; +#endif +}; + +struct npc_kpu_action1 { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd_63_36 : 28; + u64 errlev : 4; + u64 errcode : 8; + u64 dp2_offset : 8; + u64 dp1_offset : 8; + u64 dp0_offset : 8; +#else + u64 dp0_offset : 8; + u64 dp1_offset : 8; + u64 dp2_offset : 8; + u64 errcode : 8; + u64 errlev : 4; + u64 rsvd_63_36 : 28; +#endif +}; + +struct npc_kpu_pkind_cpi_def { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 ena : 1; + u64 rsvd_62_59 : 4; + u64 lid : 3; + u64 ltype_match : 4; + u64 ltype_mask : 4; + u64 flags_match : 8; + u64 flags_mask : 8; + u64 add_offset : 8; + u64 add_mask : 8; + u64 rsvd_15 : 1; + u64 add_shift : 3; + u64 rsvd_11_10 : 2; + u64 cpi_base : 10; +#else + u64 cpi_base : 10; + u64 rsvd_11_10 : 2; + u64 add_shift : 3; + u64 rsvd_15 : 1; + u64 add_mask : 8; + u64 add_offset : 8; + u64 flags_mask : 8; + u64 flags_match : 8; + u64 ltype_mask : 4; + u64 ltype_match : 4; + u64 lid : 3; + u64 rsvd_62_59 : 4; + u64 ena : 1; +#endif +}; + +struct nix_rx_action { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd_63_61 :3; + u64 flow_key_alg :5; + u64 match_id :16; + u64 index :20; + u64 pf_func :16; + u64 op :4; +#else + u64 op :4; + u64 pf_func :16; + u64 index :20; + u64 match_id :16; + u64 flow_key_alg :5; + u64 rsvd_63_61 :3; +#endif +}; + +#endif /* NPC_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h new file mode 100644 index 000000000000..b2ce957605bb --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -0,0 +1,5709 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef NPC_PROFILE_H +#define NPC_PROFILE_H + +#define NPC_ETYPE_IP 0x0800 +#define NPC_ETYPE_IP6 0x86dd +#define NPC_ETYPE_ARP 0x0806 +#define NPC_ETYPE_RARP 0x8035 +#define NPC_ETYPE_MPLSU 0x8847 +#define NPC_ETYPE_MPLSM 0x8848 +#define NPC_ETYPE_ETAG 0x893f +#define NPC_ETYPE_CTAG 0x8100 +#define NPC_ETYPE_SBTAG 0x88a8 +#define NPC_ETYPE_ITAG 0x88e7 +#define NPC_ETYPE_PTP 0x88f7 +#define NPC_ETYPE_FCOE 0x8906 +#define NPC_ETYPE_QINQ 0x9100 +#define NPC_ETYPE_TRANS_ETH_BR 0x6558 +#define NPC_ETYPE_PPP 0x880b +#define NPC_ETYPE_NSH 0x894f + +#define NPC_IPNH_HOP 0 +#define NPC_IPNH_ICMP 1 +#define NPC_IPNH_IGMP 2 +#define NPC_IPNH_IP 4 +#define NPC_IPNH_TCP 6 +#define NPC_IPNH_UDP 17 +#define NPC_IPNH_IP6 41 +#define NPC_IPNH_ROUT 43 +#define NPC_IPNH_FRAG 44 +#define NPC_IPNH_GRE 47 +#define NPC_IPNH_ESP 50 +#define NPC_IPNH_AH 51 +#define NPC_IPNH_ICMP6 58 +#define NPC_IPNH_NONH 59 +#define NPC_IPNH_DEST 60 +#define NPC_IPNH_SCTP 132 +#define NPC_IPNH_MPLS 137 + +#define NPC_UDP_PORT_GTPC 2123 +#define NPC_UDP_PORT_GTPU 2152 +#define NPC_UDP_PORT_VXLAN 4789 +#define NPC_UDP_PORT_VXLANGPE 4790 +#define NPC_UDP_PORT_GENEVE 6081 + +#define NPC_VXLANGPE_NP_IP 0x1 +#define NPC_VXLANGPE_NP_IP6 0x2 +#define NPC_VXLANGPE_NP_ETH 0x3 +#define NPC_VXLANGPE_NP_NSH 0x4 +#define NPC_VXLANGPE_NP_MPLS 0x5 +#define NPC_VXLANGPE_NP_GBP 0x6 +#define NPC_VXLANGPE_NP_VBNG 0x7 + +#define NPC_NSH_NP_IP 0x1 +#define NPC_NSH_NP_IP6 0x2 +#define NPC_NSH_NP_ETH 0x3 +#define NPC_NSH_NP_NSH 0x4 +#define NPC_NSH_NP_MPLS 0x5 + +#define NPC_TCP_PORT_HTTP 80 +#define NPC_TCP_PORT_HTTPS 443 +#define NPC_TCP_PORT_PPTP 1723 + +#define NPC_MPLS_S 0x0100 + +#define NPC_IP_VER_4 0x4000 +#define NPC_IP_VER_6 0x6000 +#define NPC_IP_VER_MASK 0xf000 +#define NPC_IP_HDR_LEN_5 0x0500 +#define NPC_IP_HDR_LEN_MASK 0x0f00 + +#define NPC_GRE_F_CSUM (0x1 << 15) +#define NPC_GRE_F_ROUTE (0x1 << 14) +#define NPC_GRE_F_KEY (0x1 << 13) +#define NPC_GRE_F_SEQ (0x1 << 12) +#define NPC_GRE_F_ACK (0x1 << 7) +#define NPC_GRE_FLAG_MASK (NPC_GRE_F_CSUM | NPC_GRE_F_ROUTE | \ + NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK) +#define NPC_GRE_VER_MASK 0x0003 +#define NPC_GRE_VER_1 0x0001 + +#define NPC_VXLAN_I 0x0800 + +#define NPC_VXLANGPE_VER (0x3 << 12) +#define NPC_VXLANGPE_I (0x1 << 11) +#define NPC_VXLANGPE_P (0x1 << 10) +#define NPC_VXLANGPE_B (0x1 << 9) +#define NPC_VXLANGPE_NP_MASK 0x00ff + +#define NPC_NSH_NP_MASK 0x00ff + +#define NPC_GENEVE_F_OAM (0x1 << 7) +#define NPC_GENEVE_F_CRI_OPT (0x1 << 6) + +#define NPC_GTP_PT_GTP (0x1 << 12) +#define NPC_GTP_PT_MASK (0x1 << 12) +#define NPC_GTP_VER1 (0x1 << 13) +#define NPC_GTP_VER_MASK (0x7 << 13) +#define NPC_GTP_MT_G_PDU 0xff +#define NPC_GTP_MT_MASK 0xff + +#define NPC_TCP_DATA_OFFSET_5 0x5000 +#define NPC_TCP_DATA_OFFSET_MASK 0xf000 + +enum npc_kpu_parser_state { + NPC_S_NA = 0, + NPC_S_KPU1_ETHER, + NPC_S_KPU1_PKI, + NPC_S_KPU2_CTAG, + NPC_S_KPU2_SBTAG, + NPC_S_KPU2_QINQ, + NPC_S_KPU2_ETAG, + NPC_S_KPU2_ITAG, + NPC_S_KPU3_CTAG, + NPC_S_KPU3_STAG, + NPC_S_KPU3_QINQ, + NPC_S_KPU3_ITAG, + NPC_S_KPU4_MPLS, + NPC_S_KPU4_NSH, + NPC_S_KPU5_IP, + NPC_S_KPU5_IP6, + NPC_S_KPU5_ARP, + NPC_S_KPU5_RARP, + NPC_S_KPU5_PTP, + NPC_S_KPU5_FCOE, + NPC_S_KPU5_MPLS, + NPC_S_KPU5_MPLS_PL, + NPC_S_KPU5_NSH, + NPC_S_KPU6_IP6_EXT, + NPC_S_KPU7_IP6_EXT, + NPC_S_KPU8_TCP, + NPC_S_KPU8_UDP, + NPC_S_KPU8_SCTP, + NPC_S_KPU8_ICMP, + NPC_S_KPU8_IGMP, + NPC_S_KPU8_ICMP6, + NPC_S_KPU8_GRE, + NPC_S_KPU8_ESP, + NPC_S_KPU8_AH, + NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, + NPC_S_KPU9_TU_MPLS, + NPC_S_KPU9_TU_NSH, + NPC_S_KPU10_TU_MPLS_PL, + NPC_S_KPU10_TU_MPLS, + NPC_S_KPU10_TU_NSH, + NPC_S_KPU11_TU_ETHER, + NPC_S_KPU11_TU_PPP, + NPC_S_KPU11_TU_MPLS_IN_NSH, + NPC_S_KPU11_TU_3RD_NSH, + NPC_S_KPU12_TU_IP, + NPC_S_KPU12_TU_IP6, + NPC_S_KPU12_TU_ARP, + NPC_S_KPU13_TU_IP6_EXT, + NPC_S_KPU14_TU_IP6_EXT, + NPC_S_KPU15_TU_TCP, + NPC_S_KPU15_TU_UDP, + NPC_S_KPU15_TU_SCTP, + NPC_S_KPU15_TU_ICMP, + NPC_S_KPU15_TU_IGMP, + NPC_S_KPU15_TU_ICMP6, + NPC_S_KPU15_TU_ESP, + NPC_S_KPU15_TU_AH, + NPC_S_KPU16_HTTP_DATA, + NPC_S_KPU16_HTTPS_DATA, + NPC_S_KPU16_PPTP_DATA, + NPC_S_KPU16_TCP_DATA, + NPC_S_KPU16_UDP_DATA, + NPC_S_LAST /* has to be the last item */ +}; + +enum npc_kpu_parser_flag { + NPC_F_NA = 0, + NPC_F_PKI, + NPC_F_PKI_VLAN, + NPC_F_PKI_ETAG, + NPC_F_PKI_ITAG, + NPC_F_PKI_MPLS, + NPC_F_PKI_NSH, + NPC_F_ETYPE_UNK, + NPC_F_ETHER_VLAN, + NPC_F_ETHER_ETAG, + NPC_F_ETHER_ITAG, + NPC_F_ETHER_MPLS, + NPC_F_ETHER_NSH, + NPC_F_STAG_CTAG, + NPC_F_STAG_CTAG_UNK, + NPC_F_STAG_STAG_CTAG, + NPC_F_STAG_STAG_STAG, + NPC_F_QINQ_CTAG, + NPC_F_QINQ_CTAG_UNK, + NPC_F_QINQ_QINQ_CTAG, + NPC_F_QINQ_QINQ_QINQ, + NPC_F_BTAG_ITAG, + NPC_F_BTAG_ITAG_STAG, + NPC_F_BTAG_ITAG_CTAG, + NPC_F_BTAG_ITAG_UNK, + NPC_F_ETAG_CTAG, + NPC_F_ETAG_BTAG_ITAG, + NPC_F_ETAG_STAG, + NPC_F_ETAG_QINQ, + NPC_F_ETAG_ITAG, + NPC_F_ETAG_ITAG_STAG, + NPC_F_ETAG_ITAG_CTAG, + NPC_F_ETAG_ITAG_UNK, + NPC_F_ITAG_STAG_CTAG, + NPC_F_ITAG_STAG, + NPC_F_ITAG_CTAG, + NPC_F_MPLS_4_LABELS, + NPC_F_MPLS_3_LABELS, + NPC_F_MPLS_2_LABELS, + NPC_F_IP_HAS_OPTIONS, + NPC_F_IP_IP_IN_IP, + NPC_F_IP_6TO4, + NPC_F_IP_MPLS_IN_IP, + NPC_F_IP_UNK_PROTO, + NPC_F_IP_IP_IN_IP_HAS_OPTIONS, + NPC_F_IP_6TO4_HAS_OPTIONS, + NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS, + NPC_F_IP_UNK_PROTO_HAS_OPTIONS, + NPC_F_IP6_HAS_EXT, + NPC_F_IP6_TUN_IP6, + NPC_F_IP6_MPLS_IN_IP, + NPC_F_TCP_HAS_OPTIONS, + NPC_F_TCP_HTTP, + NPC_F_TCP_HTTPS, + NPC_F_TCP_PPTP, + NPC_F_TCP_UNK_PORT, + NPC_F_TCP_HTTP_HAS_OPTIONS, + NPC_F_TCP_HTTPS_HAS_OPTIONS, + NPC_F_TCP_PPTP_HAS_OPTIONS, + NPC_F_TCP_UNK_PORT_HAS_OPTIONS, + NPC_F_UDP_VXLAN, + NPC_F_UDP_VXLAN_NOVNI, + NPC_F_UDP_VXLAN_NOVNI_NSH, + NPC_F_UDP_VXLANGPE, + NPC_F_UDP_VXLANGPE_NSH, + NPC_F_UDP_VXLANGPE_MPLS, + NPC_F_UDP_VXLANGPE_NOVNI, + NPC_F_UDP_VXLANGPE_NOVNI_NSH, + NPC_F_UDP_VXLANGPE_NOVNI_MPLS, + NPC_F_UDP_VXLANGPE_UNK, + NPC_F_UDP_VXLANGPE_NONP, + NPC_F_UDP_GTP_GTPC, + NPC_F_UDP_GTP_GTPU_G_PDU, + NPC_F_UDP_GTP_GTPU_UNK, + NPC_F_UDP_UNK_PORT, + NPC_F_UDP_GENEVE, + NPC_F_UDP_GENEVE_OAM, + NPC_F_UDP_GENEVE_CRI_OPT, + NPC_F_UDP_GENEVE_OAM_CRI_OPT, + NPC_F_GRE_NVGRE, + NPC_F_GRE_HAS_SRE, + NPC_F_GRE_HAS_CSUM, + NPC_F_GRE_HAS_KEY, + NPC_F_GRE_HAS_SEQ, + NPC_F_GRE_HAS_CSUM_KEY, + NPC_F_GRE_HAS_CSUM_SEQ, + NPC_F_GRE_HAS_KEY_SEQ, + NPC_F_GRE_HAS_CSUM_KEY_SEQ, + NPC_F_GRE_HAS_ROUTE, + NPC_F_GRE_UNK_PROTO, + NPC_F_GRE_VER1, + NPC_F_GRE_VER1_HAS_SEQ, + NPC_F_GRE_VER1_HAS_ACK, + NPC_F_GRE_VER1_HAS_SEQ_ACK, + NPC_F_GRE_VER1_UNK_PROTO, + NPC_F_TU_ETHER_UNK, + NPC_F_TU_ETHER_CTAG, + NPC_F_TU_ETHER_CTAG_UNK, + NPC_F_TU_ETHER_STAG_CTAG, + NPC_F_TU_ETHER_STAG_CTAG_UNK, + NPC_F_TU_ETHER_STAG, + NPC_F_TU_ETHER_STAG_UNK, + NPC_F_TU_ETHER_QINQ_CTAG, + NPC_F_TU_ETHER_QINQ_CTAG_UNK, + NPC_F_TU_ETHER_QINQ, + NPC_F_TU_ETHER_QINQ_UNK, + NPC_F_LAST /* has to be the last item */ +}; + +enum npc_kpu_err_code { + NPC_EC_NOERR = 0, /* has to be zero */ + NPC_EC_UNK, + NPC_EC_L2_K1, + NPC_EC_L2_K2, + NPC_EC_L2_K3, + NPC_EC_L2_K3_ETYPE_UNK, + NPC_EC_L2_MPLS_2MANY, + NPC_EC_L2_K4, + NPC_EC_IP_VER, + NPC_EC_IP6_VER, + NPC_EC_VXLAN, + NPC_EC_NVGRE, + NPC_EC_GRE, + NPC_EC_GRE_VER1, + NPC_EC_L4, + NPC_EC_LAST /* has to be the last item */ +}; + +enum NPC_ERRLEV_E { + NPC_ERRLEV_RE = 0, + NPC_ERRLEV_LA = 1, + NPC_ERRLEV_LB = 2, + NPC_ERRLEV_LC = 3, + NPC_ERRLEV_LD = 4, + NPC_ERRLEV_LE = 5, + NPC_ERRLEV_LF = 6, + NPC_ERRLEV_LG = 7, + NPC_ERRLEV_LH = 8, + NPC_ERRLEV_R9 = 9, + NPC_ERRLEV_R10 = 10, + NPC_ERRLEV_R11 = 11, + NPC_ERRLEV_R12 = 12, + NPC_ERRLEV_R13 = 13, + NPC_ERRLEV_R14 = 14, + NPC_ERRLEV_NIX = 15, + NPC_ERRLEV_ENUM_LAST = 16, +}; + +static struct npc_kpu_profile_action ikpu_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, + 0, 0, NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 1, 0xff, + 0, 0, + }, +}; + +static struct npc_kpu_profile_cam kpu1_cam_entries[] = { + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_RARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_PTP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_FCOE, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ETAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSU, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSM, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_NSH, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, 0x0000, 0xfc00, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, 0x0400, 0xfe00, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_RARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_PTP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_FCOE, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_CTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_SBTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_QINQ, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ETAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSU, 0xffff, + 0x0010, 0x0010, 0x0000, 0xffff, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSM, 0xffff, + 0x0010, 0x0010, 0x0000, 0xffff, + }, + { + NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_NSH, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU1_PKI, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_NA, 0X00, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu2_cam_entries[] = { + { + NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_SBTAG, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_RARP, 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_PTP, 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_FCOE, 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_MPLSU, 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_MPLSM, 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_NSH, 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_RARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_PTP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_FCOE, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_NSH, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff, + NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff, + NPC_ETYPE_QINQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_RARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_PTP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_FCOE, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSU, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSM, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_NSH, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_ITAG, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_QINQ, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_NA, 0X00, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu3_cam_entries[] = { + { + NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_ARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_RARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSU, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSM, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_NSH, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_NA, 0X00, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu4_cam_entries[] = { + { + NPC_S_KPU4_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S, + NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000, + }, + { + NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S, + 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S, + }, + { + NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S, + 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S, + }, + { + NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_NA, 0X00, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu5_cam_entries[] = { + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_ARP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_RARP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_PTP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_FCOE, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, NPC_IPNH_GRE << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, NPC_IPNH_IP6 << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, NPC_IPNH_MPLS << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, + 0x0000, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, 0x0000, NPC_MPLS_S, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_NA, 0X00, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu6_cam_entries[] = { + { + NPC_S_KPU6_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu7_cam_entries[] = { + { + NPC_S_KPU7_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu8_cam_entries[] = { + { + NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff, + NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff, + NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff, + NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000, + NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff, + NPC_VXLAN_I, NPC_VXLAN_I, 0x0000, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff, + 0x0000, 0xffff, 0x0000, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, + NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, + NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, + NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, + NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, + NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, + NPC_VXLANGPE_P, NPC_VXLANGPE_P, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, + 0x0000, NPC_VXLANGPE_P, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, + 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_TRANS_ETH_BR, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, + NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_TRANS_ETH_BR, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, + NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_TRANS_ETH_BR, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_TRANS_ETH_BR, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, + 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, + NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, + NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, + 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP6, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, + NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP6, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, + NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPC, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff, + NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU, + NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK, + 0x0000, 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_SCTP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_ICMP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_IGMP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_ICMP6, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_ESP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_AH, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff, + NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, + 0x0000, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, + NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, + NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, + NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, + 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, + 0x0000, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, + NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, + NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, + NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, + 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, + 0x0000, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, + NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, + NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, + NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, + 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, + NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, + NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, + NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, + 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, + NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, + NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, + NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, + 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff, + NPC_GRE_F_ROUTE, 0x4fff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff, + 0x0000, 0x4fff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff, + 0x0000, 0x0003, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff, + NPC_GRE_F_KEY | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1, + 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff, + NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1, + 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1, + 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff, + 0x2001, 0xef7f, 0x0000, 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff, + 0x0001, 0x0003, 0x0000, 0x0000, + }, + { + NPC_S_NA, 0X00, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu9_cam_entries[] = { + { + NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, NPC_MPLS_S, NPC_MPLS_S, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S, + NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S, + 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S, + 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S, + NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S, + 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S, + 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_NA, 0X00, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu10_cam_entries[] = { + { + NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, + 0x0000, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_NA, 0X00, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu11_cam_entries[] = { + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff, + NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, + NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, + NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, + NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, + NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, + NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, + NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, + NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, + NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_PPP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS_IN_NSH, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU11_TU_3RD_NSH, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_NA, 0X00, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu12_cam_entries[] = { + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000, + NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_ARP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000, + NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_NA, 0X00, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu13_cam_entries[] = { + { + NPC_S_KPU13_TU_IP6_EXT, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu14_cam_entries[] = { + { + NPC_S_KPU14_TU_IP6_EXT, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu15_cam_entries[] = { + { + NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff, + NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff, + NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff, + NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000, + NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_UDP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_SCTP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_ICMP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_IGMP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_ICMP6, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_ESP, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU15_TU_AH, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_NA, 0X00, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_cam kpu16_cam_entries[] = { + { + NPC_S_KPU16_TCP_DATA, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU16_HTTP_DATA, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU16_HTTPS_DATA, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU16_PPTP_DATA, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, + { + NPC_S_KPU16_UDP_DATA, 0xff, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, + }, +}; + +static struct npc_kpu_profile_action kpu1_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU5_IP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU5_IP6, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 3, 0, NPC_S_KPU5_ARP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 3, 0, NPC_S_KPU5_RARP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 3, 0, NPC_S_KPU5_PTP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 3, 0, NPC_S_KPU5_FCOE, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, + 0, 0, NPC_S_KPU2_CTAG, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20, + 0, 0, NPC_S_KPU2_SBTAG, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, + 0, 0, NPC_S_KPU2_QINQ, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24, + 0, 0, NPC_S_KPU2_ETAG, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ETAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24, + 0, 0, NPC_S_KPU2_ITAG, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 2, 0, NPC_S_KPU4_MPLS, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 2, 0, NPC_S_KPU4_MPLS, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 2, 0, NPC_S_KPU4_NSH, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_NSH, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU5_IP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU5_IP6, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 3, 0, NPC_S_KPU5_ARP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 3, 0, NPC_S_KPU5_RARP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 3, 0, NPC_S_KPU5_PTP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 3, 0, NPC_S_KPU5_FCOE, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, + 0, 0, NPC_S_KPU2_CTAG, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20, + 0, 0, NPC_S_KPU2_SBTAG, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, + 0, 0, NPC_S_KPU2_QINQ, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24, + 0, 0, NPC_S_KPU2_ETAG, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ETAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24, + 0, 0, NPC_S_KPU2_ITAG, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 2, 0, NPC_S_KPU4_MPLS, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 2, 0, NPC_S_KPU4_MPLS, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 2, 0, NPC_S_KPU4_NSH, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_NSH, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LA, NPC_EC_L2_K1, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu2_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_RARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_PTP, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_FCOE, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 1, 0, NPC_S_KPU4_NSH, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, NPC_F_ETYPE_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_RARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_PTP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_FCOE, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 1, 0, NPC_S_KPU4_NSH, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, + 0, 0, NPC_S_KPU3_CTAG, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, + 0, 0, NPC_S_KPU3_STAG, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_STAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 22, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 22, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 22, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_RARP, 22, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_PTP, 22, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_FCOE, 22, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 22, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 22, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 1, 0, NPC_S_KPU4_NSH, 22, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU3_STAG, 22, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_STAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU3_CTAG, 22, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_RARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_PTP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_FCOE, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 1, 0, NPC_S_KPU4_NSH, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_ETYPE_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 8, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 8, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_RARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_PTP, 8, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_FCOE, 8, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 1, 0, NPC_S_KPU4_NSH, 8, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, + 0, 0, NPC_S_KPU3_CTAG, 8, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, + 0, 0, NPC_S_KPU3_QINQ, 8, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_QINQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 4, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 4, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_RARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_PTP, 4, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_FCOE, 4, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 1, 0, NPC_S_KPU4_NSH, 4, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_ETYPE_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 8, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 8, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_RARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_PTP, 8, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_FCOE, 8, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, 1, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 1, 0, NPC_S_KPU4_NSH, 8, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU3_CTAG, 8, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24, + 0, 0, NPC_S_KPU3_ITAG, 12, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_BTAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, + 0, 0, NPC_S_KPU3_STAG, 8, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_STAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, + 0, 0, NPC_S_KPU3_QINQ, 8, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_QINQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 26, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 26, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 26, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU3_STAG, 26, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_STAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU3_CTAG, 26, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETYPE_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 18, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 18, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 18, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_RARP, 18, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 26, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 26, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 26, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 22, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 22, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 22, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 22, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 22, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 22, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu3_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 1, 0, NPC_S_KPU5_IP, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 1, 0, NPC_S_KPU5_IP6, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_ARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_RARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_PTP, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_FCOE, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU4_NSH, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 1, 0, NPC_S_KPU5_IP, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 1, 0, NPC_S_KPU5_IP6, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_ARP, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_RARP, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_PTP, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_FCOE, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU4_MPLS, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU4_MPLS, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU4_NSH, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 1, 0, NPC_S_KPU5_IP, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 1, 0, NPC_S_KPU5_IP6, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_ARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_RARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU4_NSH, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 1, 0, NPC_S_KPU5_IP, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 1, 0, NPC_S_KPU5_IP6, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_ARP, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_RARP, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_PTP, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_FCOE, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU4_MPLS, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU4_MPLS, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU4_NSH, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 1, 0, NPC_S_KPU5_IP, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 1, 0, NPC_S_KPU5_IP6, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_ARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_RARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_PTP, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_FCOE, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU4_NSH, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU5_IP, 18, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU5_IP6, 18, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_ARP, 18, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU5_RARP, 18, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 1, 0, NPC_S_KPU5_IP, 26, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 1, 0, NPC_S_KPU5_IP6, 26, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_ARP, 26, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 1, 0, NPC_S_KPU5_IP, 22, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 1, 0, NPC_S_KPU5_IP6, 22, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_ARP, 22, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 1, 0, NPC_S_KPU5_IP, 22, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 1, 0, NPC_S_KPU5_IP6, 22, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU5_ARP, 22, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu4_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU5_MPLS_PL, 4, 1, + NPC_LID_LC, NPC_LT_LC_MPLS, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU5_MPLS_PL, 8, 1, + NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_2_LABELS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU5_MPLS_PL, 12, 1, + NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_3_LABELS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0, + 0, 0, NPC_S_KPU5_MPLS, 12, 1, + NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_4_LABELS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 7, 0, NPC_S_KPU12_TU_IP, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 7, 0, NPC_S_KPU12_TU_IP6, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 6, 0, NPC_S_KPU11_TU_ETHER, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU5_NSH, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 4, 0, NPC_S_KPU9_TU_MPLS, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_K4, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu5_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, + 2, 0, NPC_S_KPU8_TCP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10, + 2, 0, NPC_S_KPU8_UDP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU8_SCTP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU8_ICMP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU8_IGMP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU8_ESP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU8_AH, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 2, 0, NPC_S_KPU8_GRE, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 6, 0, NPC_S_KPU12_TU_IP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 6, 0, NPC_S_KPU12_TU_IP6, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, + 2, 0, NPC_S_KPU8_TCP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10, + 2, 0, NPC_S_KPU8_UDP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU8_SCTP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU8_ICMP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU8_IGMP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU8_ESP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU8_AH, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 2, 0, NPC_S_KPU8_GRE, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 6, 0, NPC_S_KPU12_TU_IP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 6, 0, NPC_S_KPU12_TU_IP6, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_IP_VER, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_ARP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_RARP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_PTP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_FCOE, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, + 2, 0, NPC_S_KPU8_TCP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10, + 2, 0, NPC_S_KPU8_UDP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU8_SCTP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU8_ICMP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU8_ICMP6, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU8_ESP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU8_AH, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU8_GRE, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 6, 0, NPC_S_KPU12_TU_IP6, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_TUN_IP6, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 3, 0, NPC_S_KPU9_TU_MPLS, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_MPLS_IN_IP, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU6_IP6_EXT, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_HAS_EXT, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LC, NPC_EC_IP6_VER, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 6, 0, NPC_S_KPU12_TU_IP, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 6, 0, NPC_S_KPU12_TU_IP6, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 5, 0, NPC_S_KPU11_TU_ETHER, 8, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 6, 0, NPC_S_KPU12_TU_IP, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 6, 0, NPC_S_KPU12_TU_IP6, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 6, 0, NPC_S_KPU12_TU_IP, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 6, 0, NPC_S_KPU12_TU_IP6, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 5, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 3, 0, NPC_S_KPU9_TU_MPLS, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu6_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu7_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu8_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 7, 0, NPC_S_KPU16_HTTP_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 7, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 7, 0, NPC_S_KPU16_PPTP_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 7, 0, NPC_S_KPU16_TCP_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 7, 0, NPC_S_KPU16_HTTP_DATA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 7, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 7, 0, NPC_S_KPU16_PPTP_DATA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 7, 0, NPC_S_KPU16_TCP_DATA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN_NOVNI, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_VXLAN, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU9_TU_NSH, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NSH, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_MPLS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU9_TU_NSH, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_NSH, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_MPLS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NONP, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, 8, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT, + 8, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, + 8, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, + 8, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT, + 8, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, + 8, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT, + 8, 0x3f, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPC, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 16, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_G_PDU, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 7, 0, NPC_S_KPU16_UDP_DATA, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_UNK_PORT, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_SCTP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_ICMP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_IGMP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_ICMP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_ESP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_AH, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 2, 0, NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_NVGRE, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_NVGRE, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, + 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU9_TU_NSH, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE_NSH, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU9_TU_NSH, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU9_TU_NSH, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU9_TU_NSH, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU9_TU_NSH, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU9_TU_NSH, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU9_TU_NSH, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU9_TU_NSH, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_ROUTE, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_UNK_PROTO, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_GRE, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU11_TU_PPP, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU11_TU_PPP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU11_TU_PPP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_ACK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU11_TU_PPP, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ_ACK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_UNK_PROTO, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_GRE_VER1, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LD, NPC_EC_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu9_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 0, + NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_2_LABELS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 0, + NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_3_LABELS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0, + 0, 0, NPC_S_KPU10_TU_MPLS, 12, 0, + NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_4_LABELS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_2_LABELS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_3_LABELS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0, + 0, 0, NPC_S_KPU10_TU_MPLS, 12, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_4_LABELS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 2, 0, NPC_S_KPU12_TU_IP, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 2, 0, NPC_S_KPU12_TU_IP6, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 1, 0, NPC_S_KPU11_TU_ETHER, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU10_TU_NSH, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 1, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu10_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 1, 0, NPC_S_KPU12_TU_IP, 4, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 1, 0, NPC_S_KPU12_TU_IP6, 4, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 0, 0, NPC_S_KPU11_TU_ETHER, 8, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 1, 0, NPC_S_KPU12_TU_IP, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 1, 0, NPC_S_KPU12_TU_IP6, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 1, 0, NPC_S_KPU12_TU_IP, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 1, 0, NPC_S_KPU12_TU_IP6, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, + 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, + 0, 2, + }, + { + NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu11_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 0, 0, NPC_S_KPU12_TU_IP, 14, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 0, 0, NPC_S_KPU12_TU_IP6, 14, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU12_TU_ARP, 14, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 0, 0, NPC_S_KPU12_TU_IP, 18, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 0, 0, NPC_S_KPU12_TU_IP6, 18, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU12_TU_ARP, 18, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 0, 0, NPC_S_KPU12_TU_IP, 22, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 0, 0, NPC_S_KPU12_TU_IP6, 22, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU12_TU_ARP, 22, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, + NPC_F_TU_ETHER_STAG_CTAG_UNK, 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 0, 0, NPC_S_KPU12_TU_IP, 18, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 0, 0, NPC_S_KPU12_TU_IP6, 18, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU12_TU_ARP, 18, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 0, 0, NPC_S_KPU12_TU_IP, 22, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 0, 0, NPC_S_KPU12_TU_IP6, 22, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU12_TU_ARP, 22, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, + NPC_F_TU_ETHER_QINQ_CTAG_UNK, 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, + 0, 0, NPC_S_KPU12_TU_IP, 18, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, + 0, 0, NPC_S_KPU12_TU_IP6, 18, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU12_TU_ARP, 18, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_UNK, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_PPP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_NSH, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_3RD_NSH, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LE, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu12_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, + 2, 0, NPC_S_KPU15_TU_TCP, 20, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 2, 0, NPC_S_KPU15_TU_UDP, 20, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_SCTP, 20, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_ICMP, 20, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_IGMP, 20, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_ESP, 20, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_AH, 20, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_UNK_PROTO, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, + 2, 0, NPC_S_KPU15_TU_TCP, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 2, 0, NPC_S_KPU15_TU_UDP, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_SCTP, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_ICMP, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_IGMP, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_ESP, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_AH, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, + 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, + NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0, 0, 0, + }, + { + NPC_ERRLEV_LF, NPC_EC_IP_VER, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ARP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, + 2, 0, NPC_S_KPU15_TU_TCP, 40, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 2, 0, NPC_S_KPU15_TU_UDP, 40, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_SCTP, 40, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_ICMP, 40, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_ICMP6, 40, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_ESP, 40, 1, + NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 2, 0, NPC_S_KPU15_TU_AH, 40, 1, + NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, + 0, 0, NPC_S_KPU13_TU_IP6_EXT, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP6, NPC_F_IP6_HAS_EXT, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LF, NPC_EC_IP6_VER, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LF, NPC_EC_UNK, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu13_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu14_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu15_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU16_HTTP_DATA, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU16_PPTP_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU16_TCP_DATA, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU16_HTTP_DATA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU16_PPTP_DATA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU16_TCP_DATA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS, + 12, 0xf0, 1, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 0, NPC_S_KPU16_UDP_DATA, 8, 1, + NPC_LID_LG, NPC_LT_LG_TU_UDP, NPC_F_UDP_UNK_PORT, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_SCTP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_ICMP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IGMP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_ICMP6, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_ESP, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_AH, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_LG, NPC_EC_L4, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 0, + NPC_LID_LG, NPC_LT_NA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile_action kpu16_action_entries[] = { + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TCP_DATA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_HTTP_DATA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_HTTPS_DATA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_PPTP_DATA, 0, 0, 0, + 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, + 0, 1, NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_UDP_DATA, 0, 0, 0, + 0, 0, + }, +}; + +static struct npc_kpu_profile npc_kpu_profiles[] = { + { + ARRAY_SIZE(kpu1_cam_entries), + ARRAY_SIZE(kpu1_action_entries), + &kpu1_cam_entries[0], + &kpu1_action_entries[0], + }, + { + ARRAY_SIZE(kpu2_cam_entries), + ARRAY_SIZE(kpu2_action_entries), + &kpu2_cam_entries[0], + &kpu2_action_entries[0], + }, + { + ARRAY_SIZE(kpu3_cam_entries), + ARRAY_SIZE(kpu3_action_entries), + &kpu3_cam_entries[0], + &kpu3_action_entries[0], + }, + { + ARRAY_SIZE(kpu4_cam_entries), + ARRAY_SIZE(kpu4_action_entries), + &kpu4_cam_entries[0], + &kpu4_action_entries[0], + }, + { + ARRAY_SIZE(kpu5_cam_entries), + ARRAY_SIZE(kpu5_action_entries), + &kpu5_cam_entries[0], + &kpu5_action_entries[0], + }, + { + ARRAY_SIZE(kpu6_cam_entries), + ARRAY_SIZE(kpu6_action_entries), + &kpu6_cam_entries[0], + &kpu6_action_entries[0], + }, + { + ARRAY_SIZE(kpu7_cam_entries), + ARRAY_SIZE(kpu7_action_entries), + &kpu7_cam_entries[0], + &kpu7_action_entries[0], + }, + { + ARRAY_SIZE(kpu8_cam_entries), + ARRAY_SIZE(kpu8_action_entries), + &kpu8_cam_entries[0], + &kpu8_action_entries[0], + }, + { + ARRAY_SIZE(kpu9_cam_entries), + ARRAY_SIZE(kpu9_action_entries), + &kpu9_cam_entries[0], + &kpu9_action_entries[0], + }, + { + ARRAY_SIZE(kpu10_cam_entries), + ARRAY_SIZE(kpu10_action_entries), + &kpu10_cam_entries[0], + &kpu10_action_entries[0], + }, + { + ARRAY_SIZE(kpu11_cam_entries), + ARRAY_SIZE(kpu11_action_entries), + &kpu11_cam_entries[0], + &kpu11_action_entries[0], + }, + { + ARRAY_SIZE(kpu12_cam_entries), + ARRAY_SIZE(kpu12_action_entries), + &kpu12_cam_entries[0], + &kpu12_action_entries[0], + }, + { + ARRAY_SIZE(kpu13_cam_entries), + ARRAY_SIZE(kpu13_action_entries), + &kpu13_cam_entries[0], + &kpu13_action_entries[0], + }, + { + ARRAY_SIZE(kpu14_cam_entries), + ARRAY_SIZE(kpu14_action_entries), + &kpu14_cam_entries[0], + &kpu14_action_entries[0], + }, + { + ARRAY_SIZE(kpu15_cam_entries), + ARRAY_SIZE(kpu15_action_entries), + &kpu15_cam_entries[0], + &kpu15_action_entries[0], + }, + { + ARRAY_SIZE(kpu16_cam_entries), + ARRAY_SIZE(kpu16_action_entries), + &kpu16_cam_entries[0], + &kpu16_action_entries[0], + }, +}; + +#endif /* NPC_PROFILE_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c new file mode 100644 index 000000000000..dc28fa2b9481 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -0,0 +1,1772 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <linux/sysfs.h> + +#include "cgx.h" +#include "rvu.h" +#include "rvu_reg.h" + +#define DRV_NAME "octeontx2-af" +#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" +#define DRV_VERSION "1.0" + +static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); + +static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf); +static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf); + +/* Supported devices */ +static const struct pci_device_id rvu_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION(DRV_STRING); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, rvu_id_table); + +/* Poll a RVU block's register 'offset', for a 'zero' + * or 'nonzero' at bits specified by 'mask' + */ +int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) +{ + unsigned long timeout = jiffies + usecs_to_jiffies(100); + void __iomem *reg; + u64 reg_val; + + reg = rvu->afreg_base + ((block << 28) | offset); + while (time_before(jiffies, timeout)) { + reg_val = readq(reg); + if (zero && !(reg_val & mask)) + return 0; + if (!zero && (reg_val & mask)) + return 0; + usleep_range(1, 5); + timeout--; + } + return -EBUSY; +} + +int rvu_alloc_rsrc(struct rsrc_bmap *rsrc) +{ + int id; + + if (!rsrc->bmap) + return -EINVAL; + + id = find_first_zero_bit(rsrc->bmap, rsrc->max); + if (id >= rsrc->max) + return -ENOSPC; + + __set_bit(id, rsrc->bmap); + + return id; +} + +int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc) +{ + int start; + + if (!rsrc->bmap) + return -EINVAL; + + start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); + if (start >= rsrc->max) + return -ENOSPC; + + bitmap_set(rsrc->bmap, start, nrsrc); + return start; +} + +static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) +{ + if (!rsrc->bmap) + return; + if (start >= rsrc->max) + return; + + bitmap_clear(rsrc->bmap, start, nrsrc); +} + +bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc) +{ + int start; + + if (!rsrc->bmap) + return false; + + start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); + if (start >= rsrc->max) + return false; + + return true; +} + +void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id) +{ + if (!rsrc->bmap) + return; + + __clear_bit(id, rsrc->bmap); +} + +int rvu_rsrc_free_count(struct rsrc_bmap *rsrc) +{ + int used; + + if (!rsrc->bmap) + return 0; + + used = bitmap_weight(rsrc->bmap, rsrc->max); + return (rsrc->max - used); +} + +int rvu_alloc_bitmap(struct rsrc_bmap *rsrc) +{ + rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max), + sizeof(long), GFP_KERNEL); + if (!rsrc->bmap) + return -ENOMEM; + return 0; +} + +/* Get block LF's HW index from a PF_FUNC's block slot number */ +int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) +{ + u16 match = 0; + int lf; + + spin_lock(&rvu->rsrc_lock); + for (lf = 0; lf < block->lf.max; lf++) { + if (block->fn_map[lf] == pcifunc) { + if (slot == match) { + spin_unlock(&rvu->rsrc_lock); + return lf; + } + match++; + } + } + spin_unlock(&rvu->rsrc_lock); + return -ENODEV; +} + +/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E. + * Some silicon variants of OcteonTX2 supports + * multiple blocks of same type. + * + * @pcifunc has to be zero when no LF is yet attached. + */ +int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) +{ + int devnum, blkaddr = -ENODEV; + u64 cfg, reg; + bool is_pf; + + switch (blktype) { + case BLKTYPE_NPC: + blkaddr = BLKADDR_NPC; + goto exit; + case BLKTYPE_NPA: + blkaddr = BLKADDR_NPA; + goto exit; + case BLKTYPE_NIX: + /* For now assume NIX0 */ + if (!pcifunc) { + blkaddr = BLKADDR_NIX0; + goto exit; + } + break; + case BLKTYPE_SSO: + blkaddr = BLKADDR_SSO; + goto exit; + case BLKTYPE_SSOW: + blkaddr = BLKADDR_SSOW; + goto exit; + case BLKTYPE_TIM: + blkaddr = BLKADDR_TIM; + goto exit; + case BLKTYPE_CPT: + /* For now assume CPT0 */ + if (!pcifunc) { + blkaddr = BLKADDR_CPT0; + goto exit; + } + break; + } + + /* Check if this is a RVU PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) { + is_pf = false; + devnum = rvu_get_hwvf(rvu, pcifunc); + } else { + is_pf = true; + devnum = rvu_get_pf(pcifunc); + } + + /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */ + if (blktype == BLKTYPE_NIX) { + reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG; + cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); + if (cfg) + blkaddr = BLKADDR_NIX0; + } + + /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */ + if (blktype == BLKTYPE_CPT) { + reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG; + cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); + if (cfg) + blkaddr = BLKADDR_CPT0; + } + +exit: + if (is_block_implemented(rvu->hw, blkaddr)) + return blkaddr; + return -ENODEV; +} + +static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, u16 pcifunc, + u16 lf, bool attach) +{ + int devnum, num_lfs = 0; + bool is_pf; + u64 reg; + + if (lf >= block->lf.max) { + dev_err(&rvu->pdev->dev, + "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n", + __func__, lf, block->name, block->lf.max); + return; + } + + /* Check if this is for a RVU PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) { + is_pf = false; + devnum = rvu_get_hwvf(rvu, pcifunc); + } else { + is_pf = true; + devnum = rvu_get_pf(pcifunc); + } + + block->fn_map[lf] = attach ? pcifunc : 0; + + switch (block->type) { + case BLKTYPE_NPA: + pfvf->npalf = attach ? true : false; + num_lfs = pfvf->npalf; + break; + case BLKTYPE_NIX: + pfvf->nixlf = attach ? true : false; + num_lfs = pfvf->nixlf; + break; + case BLKTYPE_SSO: + attach ? pfvf->sso++ : pfvf->sso--; + num_lfs = pfvf->sso; + break; + case BLKTYPE_SSOW: + attach ? pfvf->ssow++ : pfvf->ssow--; + num_lfs = pfvf->ssow; + break; + case BLKTYPE_TIM: + attach ? pfvf->timlfs++ : pfvf->timlfs--; + num_lfs = pfvf->timlfs; + break; + case BLKTYPE_CPT: + attach ? pfvf->cptlfs++ : pfvf->cptlfs--; + num_lfs = pfvf->cptlfs; + break; + } + + reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg; + rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); +} + +inline int rvu_get_pf(u16 pcifunc) +{ + return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; +} + +void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) +{ + u64 cfg; + + /* Get numVFs attached to this PF and first HWVF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + *numvfs = (cfg >> 12) & 0xFF; + *hwvf = cfg & 0xFFF; +} + +static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) +{ + int pf, func; + u64 cfg; + + pf = rvu_get_pf(pcifunc); + func = pcifunc & RVU_PFVF_FUNC_MASK; + + /* Get first HWVF attached to this PF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + + return ((cfg & 0xFFF) + func - 1); +} + +struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) +{ + /* Check if it is a PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) + return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; + else + return &rvu->pf[rvu_get_pf(pcifunc)]; +} + +bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr) +{ + struct rvu_block *block; + + if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT) + return false; + + block = &hw->block[blkaddr]; + return block->implemented; +} + +static void rvu_check_block_implemented(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkid; + u64 cfg; + + /* For each block check if 'implemented' bit is set */ + for (blkid = 0; blkid < BLK_COUNT; blkid++) { + block = &hw->block[blkid]; + cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid)); + if (cfg & BIT_ULL(11)) + block->implemented = true; + } +} + +int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf) +{ + int err; + + if (!block->implemented) + return 0; + + rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12)); + err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12), + true); + return err; +} + +static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) +{ + struct rvu_block *block = &rvu->hw->block[blkaddr]; + + if (!block->implemented) + return; + + rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); + rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); +} + +static void rvu_reset_all_blocks(struct rvu *rvu) +{ + /* Do a HW reset of all RVU blocks */ + rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST); +} + +static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) +{ + struct rvu_pfvf *pfvf; + u64 cfg; + int lf; + + for (lf = 0; lf < block->lf.max; lf++) { + cfg = rvu_read64(rvu, block->addr, + block->lfcfg_reg | (lf << block->lfshift)); + if (!(cfg & BIT_ULL(63))) + continue; + + /* Set this resource as being used */ + __set_bit(lf, block->lf.bmap); + + /* Get, to whom this LF is attached */ + pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF); + rvu_update_rsrc_map(rvu, pfvf, block, + (cfg >> 8) & 0xFFFF, lf, true); + + /* Set start MSIX vector for this LF within this PF/VF */ + rvu_set_msix_offset(rvu, pfvf, block, lf); + } +} + +static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf) +{ + int min_vecs; + + if (!vf) + goto check_pf; + + if (!nvecs) { + dev_warn(rvu->dev, + "PF%d:VF%d is configured with zero msix vectors, %d\n", + pf, vf - 1, nvecs); + } + return; + +check_pf: + if (pf == 0) + min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT; + else + min_vecs = RVU_PF_INT_VEC_CNT; + + if (!(nvecs < min_vecs)) + return; + dev_warn(rvu->dev, + "PF%d is configured with too few vectors, %d, min is %d\n", + pf, nvecs, min_vecs); +} + +static int rvu_setup_msix_resources(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf, vf, numvfs, hwvf, err; + int nvecs, offset, max_msix; + struct rvu_pfvf *pfvf; + u64 cfg, phy_addr; + dma_addr_t iova; + + for (pf = 0; pf < hw->total_pfs; pf++) { + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + /* If PF is not enabled, nothing to do */ + if (!((cfg >> 20) & 0x01)) + continue; + + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + + pfvf = &rvu->pf[pf]; + /* Get num of MSIX vectors attached to this PF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf)); + pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1; + rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0); + + /* Alloc msix bitmap for this PF */ + err = rvu_alloc_bitmap(&pfvf->msix); + if (err) + return err; + + /* Allocate memory for MSIX vector to RVU block LF mapping */ + pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, + sizeof(u16), GFP_KERNEL); + if (!pfvf->msix_lfmap) + return -ENOMEM; + + /* For PF0 (AF) firmware will set msix vector offsets for + * AF, block AF and PF0_INT vectors, so jump to VFs. + */ + if (!pf) + goto setup_vfmsix; + + /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors. + * These are allocated on driver init and never freed, + * so no need to set 'msix_lfmap' for these. + */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf)); + nvecs = (cfg >> 12) & 0xFF; + cfg &= ~0x7FFULL; + offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); + rvu_write64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_INT_CFG(pf), cfg | offset); +setup_vfmsix: + /* Alloc msix bitmap for VFs */ + for (vf = 0; vf < numvfs; vf++) { + pfvf = &rvu->hwvf[hwvf + vf]; + /* Get num of MSIX vectors attached to this VF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_MSIX_CFG(pf)); + pfvf->msix.max = (cfg & 0xFFF) + 1; + rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1); + + /* Alloc msix bitmap for this VF */ + err = rvu_alloc_bitmap(&pfvf->msix); + if (err) + return err; + + pfvf->msix_lfmap = + devm_kcalloc(rvu->dev, pfvf->msix.max, + sizeof(u16), GFP_KERNEL); + if (!pfvf->msix_lfmap) + return -ENOMEM; + + /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors. + * These are allocated on driver init and never freed, + * so no need to set 'msix_lfmap' for these. + */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_HWVFX_INT_CFG(hwvf + vf)); + nvecs = (cfg >> 12) & 0xFF; + cfg &= ~0x7FFULL; + offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); + rvu_write64(rvu, BLKADDR_RVUM, + RVU_PRIV_HWVFX_INT_CFG(hwvf + vf), + cfg | offset); + } + } + + /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence + * create a IOMMU mapping for the physcial address configured by + * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA. + */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); + max_msix = cfg & 0xFFFFF; + phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE); + iova = dma_map_resource(rvu->dev, phy_addr, + max_msix * PCI_MSIX_ENTRY_SIZE, + DMA_BIDIRECTIONAL, 0); + + if (dma_mapping_error(rvu->dev, iova)) + return -ENOMEM; + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova); + rvu->msix_base_iova = iova; + + return 0; +} + +static void rvu_free_hw_resources(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + int id, max_msix; + u64 cfg; + + rvu_npa_freemem(rvu); + rvu_npc_freemem(rvu); + rvu_nix_freemem(rvu); + + /* Free block LF bitmaps */ + for (id = 0; id < BLK_COUNT; id++) { + block = &hw->block[id]; + kfree(block->lf.bmap); + } + + /* Free MSIX bitmaps */ + for (id = 0; id < hw->total_pfs; id++) { + pfvf = &rvu->pf[id]; + kfree(pfvf->msix.bmap); + } + + for (id = 0; id < hw->total_vfs; id++) { + pfvf = &rvu->hwvf[id]; + kfree(pfvf->msix.bmap); + } + + /* Unmap MSIX vector base IOVA mapping */ + if (!rvu->msix_base_iova) + return; + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); + max_msix = cfg & 0xFFFFF; + dma_unmap_resource(rvu->dev, rvu->msix_base_iova, + max_msix * PCI_MSIX_ENTRY_SIZE, + DMA_BIDIRECTIONAL, 0); +} + +static int rvu_setup_hw_resources(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkid, err; + u64 cfg; + + /* Get HW supported max RVU PF & VF count */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); + hw->total_pfs = (cfg >> 32) & 0xFF; + hw->total_vfs = (cfg >> 20) & 0xFFF; + hw->max_vfs_per_pf = (cfg >> 40) & 0xFF; + + /* Init NPA LF's bitmap */ + block = &hw->block[BLKADDR_NPA]; + if (!block->implemented) + goto nix; + cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST); + block->lf.max = (cfg >> 16) & 0xFFF; + block->addr = BLKADDR_NPA; + block->type = BLKTYPE_NPA; + block->lfshift = 8; + block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG; + block->lfcfg_reg = NPA_PRIV_LFX_CFG; + block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG; + block->lfreset_reg = NPA_AF_LF_RST; + sprintf(block->name, "NPA"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +nix: + /* Init NIX LF's bitmap */ + block = &hw->block[BLKADDR_NIX0]; + if (!block->implemented) + goto sso; + cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2); + block->lf.max = cfg & 0xFFF; + block->addr = BLKADDR_NIX0; + block->type = BLKTYPE_NIX; + block->lfshift = 8; + block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG; + block->lfcfg_reg = NIX_PRIV_LFX_CFG; + block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; + block->lfreset_reg = NIX_AF_LF_RST; + sprintf(block->name, "NIX"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +sso: + /* Init SSO group's bitmap */ + block = &hw->block[BLKADDR_SSO]; + if (!block->implemented) + goto ssow; + cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST); + block->lf.max = cfg & 0xFFFF; + block->addr = BLKADDR_SSO; + block->type = BLKTYPE_SSO; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG; + block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG; + block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG; + block->lfreset_reg = SSO_AF_LF_HWGRP_RST; + sprintf(block->name, "SSO GROUP"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +ssow: + /* Init SSO workslot's bitmap */ + block = &hw->block[BLKADDR_SSOW]; + if (!block->implemented) + goto tim; + block->lf.max = (cfg >> 56) & 0xFF; + block->addr = BLKADDR_SSOW; + block->type = BLKTYPE_SSOW; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG; + block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG; + block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG; + block->lfreset_reg = SSOW_AF_LF_HWS_RST; + sprintf(block->name, "SSOWS"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +tim: + /* Init TIM LF's bitmap */ + block = &hw->block[BLKADDR_TIM]; + if (!block->implemented) + goto cpt; + cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST); + block->lf.max = cfg & 0xFFFF; + block->addr = BLKADDR_TIM; + block->type = BLKTYPE_TIM; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG; + block->lfcfg_reg = TIM_PRIV_LFX_CFG; + block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG; + block->lfreset_reg = TIM_AF_LF_RST; + sprintf(block->name, "TIM"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +cpt: + /* Init CPT LF's bitmap */ + block = &hw->block[BLKADDR_CPT0]; + if (!block->implemented) + goto init; + cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0); + block->lf.max = cfg & 0xFF; + block->addr = BLKADDR_CPT0; + block->type = BLKTYPE_CPT; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG; + block->lfcfg_reg = CPT_PRIV_LFX_CFG; + block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; + block->lfreset_reg = CPT_AF_LF_RST; + sprintf(block->name, "CPT"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +init: + /* Allocate memory for PFVF data */ + rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, + sizeof(struct rvu_pfvf), GFP_KERNEL); + if (!rvu->pf) + return -ENOMEM; + + rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, + sizeof(struct rvu_pfvf), GFP_KERNEL); + if (!rvu->hwvf) + return -ENOMEM; + + spin_lock_init(&rvu->rsrc_lock); + + err = rvu_setup_msix_resources(rvu); + if (err) + return err; + + for (blkid = 0; blkid < BLK_COUNT; blkid++) { + block = &hw->block[blkid]; + if (!block->lf.bmap) + continue; + + /* Allocate memory for block LF/slot to pcifunc mapping info */ + block->fn_map = devm_kcalloc(rvu->dev, block->lf.max, + sizeof(u16), GFP_KERNEL); + if (!block->fn_map) + return -ENOMEM; + + /* Scan all blocks to check if low level firmware has + * already provisioned any of the resources to a PF/VF. + */ + rvu_scan_block(rvu, block); + } + + err = rvu_npc_init(rvu); + if (err) + return err; + + err = rvu_npa_init(rvu); + if (err) + return err; + + err = rvu_nix_init(rvu); + if (err) + return err; + + return 0; +} + +/* NPA and NIX admin queue APIs */ +void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq) +{ + if (!aq) + return; + + qmem_free(rvu->dev, aq->inst); + qmem_free(rvu->dev, aq->res); + devm_kfree(rvu->dev, aq); +} + +int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, + int qsize, int inst_size, int res_size) +{ + struct admin_queue *aq; + int err; + + *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL); + if (!*ad_queue) + return -ENOMEM; + aq = *ad_queue; + + /* Alloc memory for instructions i.e AQ */ + err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size); + if (err) { + devm_kfree(rvu->dev, aq); + return err; + } + + /* Alloc memory for results */ + err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size); + if (err) { + rvu_aq_free(rvu, aq); + return err; + } + + spin_lock_init(&aq->lock); + return 0; +} + +static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req, + struct ready_msg_rsp *rsp) +{ + return 0; +} + +/* Get current count of a RVU block's LF/slots + * provisioned to a given RVU func. + */ +static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype) +{ + switch (blktype) { + case BLKTYPE_NPA: + return pfvf->npalf ? 1 : 0; + case BLKTYPE_NIX: + return pfvf->nixlf ? 1 : 0; + case BLKTYPE_SSO: + return pfvf->sso; + case BLKTYPE_SSOW: + return pfvf->ssow; + case BLKTYPE_TIM: + return pfvf->timlfs; + case BLKTYPE_CPT: + return pfvf->cptlfs; + } + return 0; +} + +static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, + int pcifunc, int slot) +{ + u64 val; + + val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13); + rvu_write64(rvu, block->addr, block->lookup_reg, val); + /* Wait for the lookup to finish */ + /* TODO: put some timeout here */ + while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13)) + ; + + val = rvu_read64(rvu, block->addr, block->lookup_reg); + + /* Check LF valid bit */ + if (!(val & (1ULL << 12))) + return -1; + + return (val & 0xFFF); +} + +static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int slot, lf, num_lfs; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc); + if (blkaddr < 0) + return; + + block = &hw->block[blkaddr]; + + num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type); + if (!num_lfs) + return; + + for (slot = 0; slot < num_lfs; slot++) { + lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot); + if (lf < 0) /* This should never happen */ + continue; + + /* Disable the LF */ + rvu_write64(rvu, blkaddr, block->lfcfg_reg | + (lf << block->lfshift), 0x00ULL); + + /* Update SW maintained mapping info as well */ + rvu_update_rsrc_map(rvu, pfvf, block, + pcifunc, lf, false); + + /* Free the resource */ + rvu_free_rsrc(&block->lf, lf); + + /* Clear MSIX vector offset for this LF */ + rvu_clear_msix_offset(rvu, pfvf, block, lf); + } +} + +static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, + u16 pcifunc) +{ + struct rvu_hwinfo *hw = rvu->hw; + bool detach_all = true; + struct rvu_block *block; + int blkid; + + spin_lock(&rvu->rsrc_lock); + + /* Check for partial resource detach */ + if (detach && detach->partial) + detach_all = false; + + /* Check for RVU block's LFs attached to this func, + * if so, detach them. + */ + for (blkid = 0; blkid < BLK_COUNT; blkid++) { + block = &hw->block[blkid]; + if (!block->lf.bmap) + continue; + if (!detach_all && detach) { + if (blkid == BLKADDR_NPA && !detach->npalf) + continue; + else if ((blkid == BLKADDR_NIX0) && !detach->nixlf) + continue; + else if ((blkid == BLKADDR_SSO) && !detach->sso) + continue; + else if ((blkid == BLKADDR_SSOW) && !detach->ssow) + continue; + else if ((blkid == BLKADDR_TIM) && !detach->timlfs) + continue; + else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs) + continue; + } + rvu_detach_block(rvu, pcifunc, block->type); + } + + spin_unlock(&rvu->rsrc_lock); + return 0; +} + +static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu, + struct rsrc_detach *detach, + struct msg_rsp *rsp) +{ + return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); +} + +static void rvu_attach_block(struct rvu *rvu, int pcifunc, + int blktype, int num_lfs) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int slot, lf; + int blkaddr; + u64 cfg; + + if (!num_lfs) + return; + + blkaddr = rvu_get_blkaddr(rvu, blktype, 0); + if (blkaddr < 0) + return; + + block = &hw->block[blkaddr]; + if (!block->lf.bmap) + return; + + for (slot = 0; slot < num_lfs; slot++) { + /* Allocate the resource */ + lf = rvu_alloc_rsrc(&block->lf); + if (lf < 0) + return; + + cfg = (1ULL << 63) | (pcifunc << 8) | slot; + rvu_write64(rvu, blkaddr, block->lfcfg_reg | + (lf << block->lfshift), cfg); + rvu_update_rsrc_map(rvu, pfvf, block, + pcifunc, lf, true); + + /* Set start MSIX vector for this LF within this PF/VF */ + rvu_set_msix_offset(rvu, pfvf, block, lf); + } +} + +static int rvu_check_rsrc_availability(struct rvu *rvu, + struct rsrc_attach *req, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int free_lfs, mappedlfs; + + /* Only one NPA LF can be attached */ + if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) { + block = &hw->block[BLKADDR_NPA]; + free_lfs = rvu_rsrc_free_count(&block->lf); + if (!free_lfs) + goto fail; + } else if (req->npalf) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid req, already has NPA\n", + pcifunc); + return -EINVAL; + } + + /* Only one NIX LF can be attached */ + if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) { + block = &hw->block[BLKADDR_NIX0]; + free_lfs = rvu_rsrc_free_count(&block->lf); + if (!free_lfs) + goto fail; + } else if (req->nixlf) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid req, already has NIX\n", + pcifunc); + return -EINVAL; + } + + if (req->sso) { + block = &hw->block[BLKADDR_SSO]; + /* Is request within limits ? */ + if (req->sso > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid SSO req, %d > max %d\n", + pcifunc, req->sso, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + free_lfs = rvu_rsrc_free_count(&block->lf); + /* Check if additional resources are available */ + if (req->sso > mappedlfs && + ((req->sso - mappedlfs) > free_lfs)) + goto fail; + } + + if (req->ssow) { + block = &hw->block[BLKADDR_SSOW]; + if (req->ssow > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid SSOW req, %d > max %d\n", + pcifunc, req->sso, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + free_lfs = rvu_rsrc_free_count(&block->lf); + if (req->ssow > mappedlfs && + ((req->ssow - mappedlfs) > free_lfs)) + goto fail; + } + + if (req->timlfs) { + block = &hw->block[BLKADDR_TIM]; + if (req->timlfs > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid TIMLF req, %d > max %d\n", + pcifunc, req->timlfs, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + free_lfs = rvu_rsrc_free_count(&block->lf); + if (req->timlfs > mappedlfs && + ((req->timlfs - mappedlfs) > free_lfs)) + goto fail; + } + + if (req->cptlfs) { + block = &hw->block[BLKADDR_CPT0]; + if (req->cptlfs > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid CPTLF req, %d > max %d\n", + pcifunc, req->cptlfs, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + free_lfs = rvu_rsrc_free_count(&block->lf); + if (req->cptlfs > mappedlfs && + ((req->cptlfs - mappedlfs) > free_lfs)) + goto fail; + } + + return 0; + +fail: + dev_info(rvu->dev, "Request for %s failed\n", block->name); + return -ENOSPC; +} + +static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu, + struct rsrc_attach *attach, + struct msg_rsp *rsp) +{ + u16 pcifunc = attach->hdr.pcifunc; + int err; + + /* If first request, detach all existing attached resources */ + if (!attach->modify) + rvu_detach_rsrcs(rvu, NULL, pcifunc); + + spin_lock(&rvu->rsrc_lock); + + /* Check if the request can be accommodated */ + err = rvu_check_rsrc_availability(rvu, attach, pcifunc); + if (err) + goto exit; + + /* Now attach the requested resources */ + if (attach->npalf) + rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1); + + if (attach->nixlf) + rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1); + + if (attach->sso) { + /* RVU func doesn't know which exact LF or slot is attached + * to it, it always sees as slot 0,1,2. So for a 'modify' + * request, simply detach all existing attached LFs/slots + * and attach a fresh. + */ + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO); + rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso); + } + + if (attach->ssow) { + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW); + rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow); + } + + if (attach->timlfs) { + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM); + rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs); + } + + if (attach->cptlfs) { + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT); + rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs); + } + +exit: + spin_unlock(&rvu->rsrc_lock); + return err; +} + +static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + int blkaddr, int lf) +{ + u16 vec; + + if (lf < 0) + return MSIX_VECTOR_INVALID; + + for (vec = 0; vec < pfvf->msix.max; vec++) { + if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf)) + return vec; + } + return MSIX_VECTOR_INVALID; +} + +static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf) +{ + u16 nvecs, vec, offset; + u64 cfg; + + cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift)); + nvecs = (cfg >> 12) & 0xFF; + + /* Check and alloc MSIX vectors, must be contiguous */ + if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs)) + return; + + offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); + + /* Config MSIX offset in LF */ + rvu_write64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift), (cfg & ~0x7FFULL) | offset); + + /* Update the bitmap as well */ + for (vec = 0; vec < nvecs; vec++) + pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf); +} + +static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf) +{ + u16 nvecs, vec, offset; + u64 cfg; + + cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift)); + nvecs = (cfg >> 12) & 0xFF; + + /* Clear MSIX offset in LF */ + rvu_write64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift), cfg & ~0x7FFULL); + + offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf); + + /* Update the mapping */ + for (vec = 0; vec < nvecs; vec++) + pfvf->msix_lfmap[offset + vec] = 0; + + /* Free the same in MSIX bitmap */ + rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); +} + +static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req, + struct msix_offset_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + int lf, slot; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (!pfvf->msix.bmap) + return 0; + + /* Set MSIX offsets for each block's LFs attached to this PF/VF */ + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0); + rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf); + + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0); + rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf); + + rsp->sso = pfvf->sso; + for (slot = 0; slot < rsp->sso; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot); + rsp->sso_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf); + } + + rsp->ssow = pfvf->ssow; + for (slot = 0; slot < rsp->ssow; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot); + rsp->ssow_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf); + } + + rsp->timlfs = pfvf->timlfs; + for (slot = 0; slot < rsp->timlfs; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot); + rsp->timlf_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf); + } + + rsp->cptlfs = pfvf->cptlfs; + for (slot = 0; slot < rsp->cptlfs; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot); + rsp->cptlf_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf); + } + return 0; +} + +static int rvu_process_mbox_msg(struct rvu *rvu, int devid, + struct mbox_msghdr *req) +{ + /* Check if valid, if not reply with a invalid msg */ + if (req->sig != OTX2_MBOX_REQ_SIG) + goto bad_message; + + switch (req->id) { +#define M(_name, _id, _req_type, _rsp_type) \ + case _id: { \ + struct _rsp_type *rsp; \ + int err; \ + \ + rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ + &rvu->mbox, devid, \ + sizeof(struct _rsp_type)); \ + if (rsp) { \ + rsp->hdr.id = _id; \ + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ + rsp->hdr.pcifunc = req->pcifunc; \ + rsp->hdr.rc = 0; \ + } \ + \ + err = rvu_mbox_handler_ ## _name(rvu, \ + (struct _req_type *)req, \ + rsp); \ + if (rsp && err) \ + rsp->hdr.rc = err; \ + \ + return rsp ? err : -ENOMEM; \ + } +MBOX_MESSAGES +#undef M + break; +bad_message: + default: + otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc, + req->id); + return -ENODEV; + } +} + +static void rvu_mbox_handler(struct work_struct *work) +{ + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + struct rvu *rvu = mwork->rvu; + struct otx2_mbox_dev *mdev; + struct mbox_hdr *req_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + int offset, id, err; + u16 pf; + + mbox = &rvu->mbox; + pf = mwork - rvu->mbox_wrk; + mdev = &mbox->dev[pf]; + + /* Process received mbox messages */ + req_hdr = mdev->mbase + mbox->rx_start; + if (req_hdr->num_msgs == 0) + return; + + offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < req_hdr->num_msgs; id++) { + msg = mdev->mbase + offset; + + /* Set which PF sent this message based on mbox IRQ */ + msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); + msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT); + err = rvu_process_mbox_msg(rvu, pf, msg); + if (!err) { + offset = mbox->rx_start + msg->next_msgoff; + continue; + } + + if (msg->pcifunc & RVU_PFVF_FUNC_MASK) + dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", + err, otx2_mbox_id2name(msg->id), msg->id, pf, + (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); + else + dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", + err, otx2_mbox_id2name(msg->id), msg->id, pf); + } + + /* Send mbox responses to PF */ + otx2_mbox_msg_send(mbox, pf); +} + +static void rvu_mbox_up_handler(struct work_struct *work) +{ + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + struct rvu *rvu = mwork->rvu; + struct otx2_mbox_dev *mdev; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + int offset, id; + u16 pf; + + mbox = &rvu->mbox_up; + pf = mwork - rvu->mbox_wrk_up; + mdev = &mbox->dev[pf]; + + rsp_hdr = mdev->mbase + mbox->rx_start; + if (rsp_hdr->num_msgs == 0) { + dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n"); + return; + } + + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < rsp_hdr->num_msgs; id++) { + msg = mdev->mbase + offset; + + if (msg->id >= MBOX_MSG_MAX) { + dev_err(rvu->dev, + "Mbox msg with unknown ID 0x%x\n", msg->id); + goto end; + } + + if (msg->sig != OTX2_MBOX_RSP_SIG) { + dev_err(rvu->dev, + "Mbox msg with wrong signature %x, ID 0x%x\n", + msg->sig, msg->id); + goto end; + } + + switch (msg->id) { + case MBOX_MSG_CGX_LINK_EVENT: + break; + default: + if (msg->rc) + dev_err(rvu->dev, + "Mbox msg response has err %d, ID 0x%x\n", + msg->rc, msg->id); + break; + } +end: + offset = mbox->rx_start + msg->next_msgoff; + mdev->msgs_acked++; + } + + otx2_mbox_reset(mbox, 0); +} + +static int rvu_mbox_init(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + void __iomem *hwbase = NULL; + struct rvu_work *mwork; + u64 bar4_addr; + int err, pf; + + rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox", + WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, + hw->total_pfs); + if (!rvu->mbox_wq) + return -ENOMEM; + + rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs, + sizeof(struct rvu_work), GFP_KERNEL); + if (!rvu->mbox_wrk) { + err = -ENOMEM; + goto exit; + } + + rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs, + sizeof(struct rvu_work), GFP_KERNEL); + if (!rvu->mbox_wrk_up) { + err = -ENOMEM; + goto exit; + } + + /* Map mbox region shared with PFs */ + bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); + /* Mailbox is a reserved memory (in RAM) region shared between + * RVU devices, shouldn't be mapped as device memory to allow + * unaligned accesses. + */ + hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs); + if (!hwbase) { + dev_err(rvu->dev, "Unable to map mailbox region\n"); + err = -ENOMEM; + goto exit; + } + + err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base, + MBOX_DIR_AFPF, hw->total_pfs); + if (err) + goto exit; + + err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base, + MBOX_DIR_AFPF_UP, hw->total_pfs); + if (err) + goto exit; + + for (pf = 0; pf < hw->total_pfs; pf++) { + mwork = &rvu->mbox_wrk[pf]; + mwork->rvu = rvu; + INIT_WORK(&mwork->work, rvu_mbox_handler); + } + + for (pf = 0; pf < hw->total_pfs; pf++) { + mwork = &rvu->mbox_wrk_up[pf]; + mwork->rvu = rvu; + INIT_WORK(&mwork->work, rvu_mbox_up_handler); + } + + return 0; +exit: + if (hwbase) + iounmap((void __iomem *)hwbase); + destroy_workqueue(rvu->mbox_wq); + return err; +} + +static void rvu_mbox_destroy(struct rvu *rvu) +{ + if (rvu->mbox_wq) { + flush_workqueue(rvu->mbox_wq); + destroy_workqueue(rvu->mbox_wq); + rvu->mbox_wq = NULL; + } + + if (rvu->mbox.hwbase) + iounmap((void __iomem *)rvu->mbox.hwbase); + + otx2_mbox_destroy(&rvu->mbox); + otx2_mbox_destroy(&rvu->mbox_up); +} + +static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + u64 intr; + u8 pf; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); + /* Clear interrupts */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); + + /* Sync with mbox memory region */ + smp_wmb(); + + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (intr & (1ULL << pf)) { + mbox = &rvu->mbox; + mdev = &mbox->dev[pf]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) + queue_work(rvu->mbox_wq, + &rvu->mbox_wrk[pf].work); + mbox = &rvu->mbox_up; + mdev = &mbox->dev[pf]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) + queue_work(rvu->mbox_wq, + &rvu->mbox_wrk_up[pf].work); + } + } + + return IRQ_HANDLED; +} + +static void rvu_enable_mbox_intr(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + + /* Clear spurious irqs, if any */ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); + + /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S, + INTR_MASK(hw->total_pfs) & ~1ULL); +} + +static void rvu_unregister_interrupts(struct rvu *rvu) +{ + int irq; + + /* Disable the Mbox interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + for (irq = 0; irq < rvu->num_vec; irq++) { + if (rvu->irq_allocated[irq]) + free_irq(pci_irq_vector(rvu->pdev, irq), rvu); + } + + pci_free_irq_vectors(rvu->pdev); + rvu->num_vec = 0; +} + +static int rvu_register_interrupts(struct rvu *rvu) +{ + int ret; + + rvu->num_vec = pci_msix_vec_count(rvu->pdev); + + rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec, + NAME_SIZE, GFP_KERNEL); + if (!rvu->irq_name) + return -ENOMEM; + + rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec, + sizeof(bool), GFP_KERNEL); + if (!rvu->irq_allocated) + return -ENOMEM; + + /* Enable MSI-X */ + ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec, + rvu->num_vec, PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(rvu->dev, + "RVUAF: Request for %d msix vectors failed, ret %d\n", + rvu->num_vec, ret); + return ret; + } + + /* Register mailbox interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); + ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), + rvu_mbox_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for mbox irq\n"); + goto fail; + } + + rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; + + /* Enable mailbox interrupts from all PFs */ + rvu_enable_mbox_intr(rvu); + + return 0; + +fail: + pci_free_irq_vectors(rvu->pdev); + return ret; +} + +static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct rvu *rvu; + int err; + + rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL); + if (!rvu) + return -ENOMEM; + + rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL); + if (!rvu->hw) { + devm_kfree(dev, rvu); + return -ENOMEM; + } + + pci_set_drvdata(pdev, rvu); + rvu->pdev = pdev; + rvu->dev = &pdev->dev; + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + goto err_freemem; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to set DMA mask\n"); + goto err_release_regions; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to set consistent DMA mask\n"); + goto err_release_regions; + } + + /* Map Admin function CSRs */ + rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0); + rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); + if (!rvu->afreg_base || !rvu->pfreg_base) { + dev_err(dev, "Unable to map admin function CSRs, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + + /* Check which blocks the HW supports */ + rvu_check_block_implemented(rvu); + + rvu_reset_all_blocks(rvu); + + err = rvu_setup_hw_resources(rvu); + if (err) + goto err_release_regions; + + err = rvu_mbox_init(rvu); + if (err) + goto err_hwsetup; + + err = rvu_cgx_probe(rvu); + if (err) + goto err_mbox; + + err = rvu_register_interrupts(rvu); + if (err) + goto err_cgx; + + return 0; +err_cgx: + rvu_cgx_wq_destroy(rvu); +err_mbox: + rvu_mbox_destroy(rvu); +err_hwsetup: + rvu_reset_all_blocks(rvu); + rvu_free_hw_resources(rvu); +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_freemem: + pci_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, rvu->hw); + devm_kfree(dev, rvu); + return err; +} + +static void rvu_remove(struct pci_dev *pdev) +{ + struct rvu *rvu = pci_get_drvdata(pdev); + + rvu_unregister_interrupts(rvu); + rvu_cgx_wq_destroy(rvu); + rvu_mbox_destroy(rvu); + rvu_reset_all_blocks(rvu); + rvu_free_hw_resources(rvu); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + devm_kfree(&pdev->dev, rvu->hw); + devm_kfree(&pdev->dev, rvu); +} + +static struct pci_driver rvu_driver = { + .name = DRV_NAME, + .id_table = rvu_id_table, + .probe = rvu_probe, + .remove = rvu_remove, +}; + +static int __init rvu_init_module(void) +{ + int err; + + pr_info("%s: %s\n", DRV_NAME, DRV_STRING); + + err = pci_register_driver(&cgx_driver); + if (err < 0) + return err; + + err = pci_register_driver(&rvu_driver); + if (err < 0) + pci_unregister_driver(&cgx_driver); + + return err; +} + +static void __exit rvu_cleanup_module(void) +{ + pci_unregister_driver(&rvu_driver); + pci_unregister_driver(&cgx_driver); +} + +module_init(rvu_init_module); +module_exit(rvu_cleanup_module); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h new file mode 100644 index 000000000000..2c0580cd2807 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -0,0 +1,368 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RVU_H +#define RVU_H + +#include "rvu_struct.h" +#include "common.h" +#include "mbox.h" + +/* PCI device IDs */ +#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 + +/* PCI BAR nos */ +#define PCI_AF_REG_BAR_NUM 0 +#define PCI_PF_REG_BAR_NUM 2 +#define PCI_MBOX_BAR_NUM 4 + +#define NAME_SIZE 32 + +/* PF_FUNC */ +#define RVU_PFVF_PF_SHIFT 10 +#define RVU_PFVF_PF_MASK 0x3F +#define RVU_PFVF_FUNC_SHIFT 0 +#define RVU_PFVF_FUNC_MASK 0x3FF + +struct rvu_work { + struct work_struct work; + struct rvu *rvu; +}; + +struct rsrc_bmap { + unsigned long *bmap; /* Pointer to resource bitmap */ + u16 max; /* Max resource id or count */ +}; + +struct rvu_block { + struct rsrc_bmap lf; + struct admin_queue *aq; /* NIX/NPA AQ */ + u16 *fn_map; /* LF to pcifunc mapping */ + bool multislot; + bool implemented; + u8 addr; /* RVU_BLOCK_ADDR_E */ + u8 type; /* RVU_BLOCK_TYPE_E */ + u8 lfshift; + u64 lookup_reg; + u64 pf_lfcnt_reg; + u64 vf_lfcnt_reg; + u64 lfcfg_reg; + u64 msixcfg_reg; + u64 lfreset_reg; + unsigned char name[NAME_SIZE]; +}; + +struct nix_mcast { + struct qmem *mce_ctx; + struct qmem *mcast_buf; + int replay_pkind; + int next_free_mce; + spinlock_t mce_lock; /* Serialize MCE updates */ +}; + +struct nix_mce_list { + struct hlist_head head; + int count; + int max; +}; + +struct npc_mcam { + spinlock_t lock; /* MCAM entries and counters update lock */ + u8 keysize; /* MCAM keysize 112/224/448 bits */ + u8 banks; /* Number of MCAM banks */ + u8 banks_per_entry;/* Number of keywords in key */ + u16 banksize; /* Number of MCAM entries in each bank */ + u16 total_entries; /* Total number of MCAM entries */ + u16 entries; /* Total minus reserved for NIX LFs */ + u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */ + u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */ +}; + +/* Structure for per RVU func info ie PF/VF */ +struct rvu_pfvf { + bool npalf; /* Only one NPALF per RVU_FUNC */ + bool nixlf; /* Only one NIXLF per RVU_FUNC */ + u16 sso; + u16 ssow; + u16 cptlfs; + u16 timlfs; + u8 cgx_lmac; + + /* Block LF's MSIX vector info */ + struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */ +#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF)) + u16 *msix_lfmap; /* Vector to block LF mapping */ + + /* NPA contexts */ + struct qmem *aura_ctx; + struct qmem *pool_ctx; + struct qmem *npa_qints_ctx; + unsigned long *aura_bmap; + unsigned long *pool_bmap; + + /* NIX contexts */ + struct qmem *rq_ctx; + struct qmem *sq_ctx; + struct qmem *cq_ctx; + struct qmem *rss_ctx; + struct qmem *cq_ints_ctx; + struct qmem *nix_qints_ctx; + unsigned long *sq_bmap; + unsigned long *rq_bmap; + unsigned long *cq_bmap; + + u16 rx_chan_base; + u16 tx_chan_base; + u8 rx_chan_cnt; /* total number of RX channels */ + u8 tx_chan_cnt; /* total number of TX channels */ + + u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */ + + /* Broadcast pkt replication info */ + u16 bcast_mce_idx; + struct nix_mce_list bcast_mce_list; +}; + +struct nix_txsch { + struct rsrc_bmap schq; + u8 lvl; + u16 *pfvf_map; +}; + +struct npc_pkind { + struct rsrc_bmap rsrc; + u32 *pfchan_map; +}; + +struct nix_hw { + struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */ + struct nix_mcast mcast; +}; + +struct rvu_hwinfo { + u8 total_pfs; /* MAX RVU PFs HW supports */ + u16 total_vfs; /* Max RVU VFs HW supports */ + u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */ + u8 cgx; + u8 lmac_per_cgx; + u8 cgx_links; + u8 lbk_links; + u8 sdp_links; + u8 npc_kpus; /* No of parser units */ + + + struct rvu_block block[BLK_COUNT]; /* Block info */ + struct nix_hw *nix0; + struct npc_pkind pkind; + struct npc_mcam mcam; +}; + +struct rvu { + void __iomem *afreg_base; + void __iomem *pfreg_base; + struct pci_dev *pdev; + struct device *dev; + struct rvu_hwinfo *hw; + struct rvu_pfvf *pf; + struct rvu_pfvf *hwvf; + spinlock_t rsrc_lock; /* Serialize resource alloc/free */ + + /* Mbox */ + struct otx2_mbox mbox; + struct rvu_work *mbox_wrk; + struct otx2_mbox mbox_up; + struct rvu_work *mbox_wrk_up; + struct workqueue_struct *mbox_wq; + + /* MSI-X */ + u16 num_vec; + char *irq_name; + bool *irq_allocated; + dma_addr_t msix_base_iova; + + /* CGX */ +#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ + u8 cgx_mapped_pfs; + u8 cgx_cnt; /* available cgx ports */ + u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ + u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for + * every cgx lmac port + */ + unsigned long pf_notify_bmap; /* Flags for PF notification */ + void **cgx_idmap; /* cgx id to cgx data map table */ + struct work_struct cgx_evh_work; + struct workqueue_struct *cgx_evh_wq; + spinlock_t cgx_evq_lock; /* cgx event queue lock */ + struct list_head cgx_evq_head; /* cgx event queue head */ +}; + +static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) +{ + writeq(val, rvu->afreg_base + ((block << 28) | offset)); +} + +static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset) +{ + return readq(rvu->afreg_base + ((block << 28) | offset)); +} + +static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val) +{ + writeq(val, rvu->pfreg_base + offset); +} + +static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) +{ + return readq(rvu->pfreg_base + offset); +} + +/* Function Prototypes + * RVU + */ +int rvu_alloc_bitmap(struct rsrc_bmap *rsrc); +int rvu_alloc_rsrc(struct rsrc_bmap *rsrc); +void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); +int rvu_rsrc_free_count(struct rsrc_bmap *rsrc); +int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc); +bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); +int rvu_get_pf(u16 pcifunc); +struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); +void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); +bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); +int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot); +int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); +int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); +int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); + +/* RVU HW reg validation */ +enum regmap_block { + TXSCHQ_HWREGMAP = 0, + MAX_HWREGMAP, +}; + +bool rvu_check_valid_reg(int regmap, int regblk, u64 reg); + +/* NPA/NIX AQ APIs */ +int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, + int qsize, int inst_size, int res_size); +void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq); + +/* CGX APIs */ +static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) +{ + return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs); +} + +static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) +{ + *cgx_id = (map >> 4) & 0xF; + *lmac_id = (map & 0xF); +} + +int rvu_cgx_probe(struct rvu *rvu); +void rvu_cgx_wq_destroy(struct rvu *rvu); +void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu); +int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start); +int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req, + struct cgx_stats_rsp *rsp); +int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu, + struct cgx_mac_addr_set_or_get *req, + struct cgx_mac_addr_set_or_get *rsp); +int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu, + struct cgx_mac_addr_set_or_get *req, + struct cgx_mac_addr_set_or_get *rsp); +int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req, + struct cgx_link_info_msg *rsp); +int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); + +/* NPA APIs */ +int rvu_npa_init(struct rvu *rvu); +void rvu_npa_freemem(struct rvu *rvu); +int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, + struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp); +int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu, + struct hwctx_disable_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu, + struct npa_lf_alloc_req *req, + struct npa_lf_alloc_rsp *rsp); +int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); + +/* NIX APIs */ +int rvu_nix_init(struct rvu *rvu); +void rvu_nix_freemem(struct rvu *rvu); +int rvu_get_nixlf_count(struct rvu *rvu); +int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, + struct nix_lf_alloc_req *req, + struct nix_lf_alloc_rsp *rsp); +int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu, + struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp); +int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu, + struct hwctx_disable_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, + struct nix_txsch_alloc_req *req, + struct nix_txsch_alloc_rsp *rsp); +int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu, + struct nix_txsch_free_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu, + struct nix_txschq_config *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu, + struct nix_vtag_config *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu, + struct nix_rss_flowkey_cfg *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu, + struct nix_set_mac_addr *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req, + struct msg_rsp *rsp); + +/* NPC APIs */ +int rvu_npc_init(struct rvu *rvu); +void rvu_npc_freemem(struct rvu *rvu); +int rvu_npc_get_pkind(struct rvu *rvu, u16 pf); +void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf); +void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, u64 chan, u8 *mac_addr); +void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, u64 chan, bool allmulti); +void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, u64 chan); +void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, + int group, int alg_idx, int mcam_index); +#endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c new file mode 100644 index 000000000000..188185c15b4a --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -0,0 +1,515 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu.h" +#include "cgx.h" + +struct cgx_evq_entry { + struct list_head evq_node; + struct cgx_link_event link_event; +}; + +#define M(_name, _id, _req_type, _rsp_type) \ +static struct _req_type __maybe_unused \ +*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid) \ +{ \ + struct _req_type *req; \ + \ + req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ + &rvu->mbox_up, devid, sizeof(struct _req_type), \ + sizeof(struct _rsp_type)); \ + if (!req) \ + return NULL; \ + req->hdr.sig = OTX2_MBOX_REQ_SIG; \ + req->hdr.id = _id; \ + return req; \ +} + +MBOX_UP_CGX_MESSAGES +#undef M + +/* Returns bitmap of mapped PFs */ +static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) +{ + return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; +} + +static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) +{ + return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF); +} + +void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) +{ + if (cgx_id >= rvu->cgx_cnt) + return NULL; + + return rvu->cgx_idmap[cgx_id]; +} + +static int rvu_map_cgx_lmac_pf(struct rvu *rvu) +{ + struct npc_pkind *pkind = &rvu->hw->pkind; + int cgx_cnt = rvu->cgx_cnt; + int cgx, lmac_cnt, lmac; + int pf = PF_CGXMAP_BASE; + int size, free_pkind; + + if (!cgx_cnt) + return 0; + + if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF) + return -EINVAL; + + /* Alloc map table + * An additional entry is required since PF id starts from 1 and + * hence entry at offset 0 is invalid. + */ + size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8); + rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL); + if (!rvu->pf2cgxlmac_map) + return -ENOMEM; + + /* Initialize offset 0 with an invalid cgx and lmac id */ + rvu->pf2cgxlmac_map[0] = 0xFF; + + /* Reverse map table */ + rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, + cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16), + GFP_KERNEL); + if (!rvu->cgxlmac2pf_map) + return -ENOMEM; + + rvu->cgx_mapped_pfs = 0; + for (cgx = 0; cgx < cgx_cnt; cgx++) { + lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) { + rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); + rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; + free_pkind = rvu_alloc_rsrc(&pkind->rsrc); + pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; + rvu->cgx_mapped_pfs++; + } + } + return 0; +} + +static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu) +{ + struct cgx_evq_entry *qentry; + unsigned long flags; + int err; + + qentry = kmalloc(sizeof(*qentry), GFP_KERNEL); + if (!qentry) + return -ENOMEM; + + /* Lock the event queue before we read the local link status */ + spin_lock_irqsave(&rvu->cgx_evq_lock, flags); + err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, + &qentry->link_event.link_uinfo); + qentry->link_event.cgx_id = cgx_id; + qentry->link_event.lmac_id = lmac_id; + if (err) + goto skip_add; + list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); +skip_add: + spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); + + /* start worker to process the events */ + queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); + + return 0; +} + +/* This is called from interrupt context and is expected to be atomic */ +static int cgx_lmac_postevent(struct cgx_link_event *event, void *data) +{ + struct cgx_evq_entry *qentry; + struct rvu *rvu = data; + + /* post event to the event queue */ + qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); + if (!qentry) + return -ENOMEM; + qentry->link_event = *event; + spin_lock(&rvu->cgx_evq_lock); + list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); + spin_unlock(&rvu->cgx_evq_lock); + + /* start worker to process the events */ + queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); + + return 0; +} + +static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) +{ + struct cgx_link_user_info *linfo; + struct cgx_link_info_msg *msg; + unsigned long pfmap; + int err, pfid; + + linfo = &event->link_uinfo; + pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); + + do { + pfid = find_first_bit(&pfmap, 16); + clear_bit(pfid, &pfmap); + + /* check if notification is enabled */ + if (!test_bit(pfid, &rvu->pf_notify_bmap)) { + dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n", + event->cgx_id, event->lmac_id, + linfo->link_up ? "UP" : "DOWN"); + continue; + } + + /* Send mbox message to PF */ + msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid); + if (!msg) + continue; + msg->link_info = *linfo; + otx2_mbox_msg_send(&rvu->mbox_up, pfid); + err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid); + if (err) + dev_warn(rvu->dev, "notification to pf %d failed\n", + pfid); + } while (pfmap); +} + +static void cgx_evhandler_task(struct work_struct *work) +{ + struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work); + struct cgx_evq_entry *qentry; + struct cgx_link_event *event; + unsigned long flags; + + do { + /* Dequeue an event */ + spin_lock_irqsave(&rvu->cgx_evq_lock, flags); + qentry = list_first_entry_or_null(&rvu->cgx_evq_head, + struct cgx_evq_entry, + evq_node); + if (qentry) + list_del(&qentry->evq_node); + spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); + if (!qentry) + break; /* nothing more to process */ + + event = &qentry->link_event; + + /* process event */ + cgx_notify_pfs(event, rvu); + kfree(qentry); + } while (1); +} + +static void cgx_lmac_event_handler_init(struct rvu *rvu) +{ + struct cgx_event_cb cb; + int cgx, lmac, err; + void *cgxd; + + spin_lock_init(&rvu->cgx_evq_lock); + INIT_LIST_HEAD(&rvu->cgx_evq_head); + INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); + rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); + if (!rvu->cgx_evh_wq) { + dev_err(rvu->dev, "alloc workqueue failed"); + return; + } + + cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */ + cb.data = rvu; + + for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) { + err = cgx_lmac_evh_register(&cb, cgxd, lmac); + if (err) + dev_err(rvu->dev, + "%d:%d handler register failed\n", + cgx, lmac); + } + } +} + +void rvu_cgx_wq_destroy(struct rvu *rvu) +{ + if (rvu->cgx_evh_wq) { + flush_workqueue(rvu->cgx_evh_wq); + destroy_workqueue(rvu->cgx_evh_wq); + rvu->cgx_evh_wq = NULL; + } +} + +int rvu_cgx_probe(struct rvu *rvu) +{ + int i, err; + + /* find available cgx ports */ + rvu->cgx_cnt = cgx_get_cgx_cnt(); + if (!rvu->cgx_cnt) { + dev_info(rvu->dev, "No CGX devices found!\n"); + return -ENODEV; + } + + rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *), + GFP_KERNEL); + if (!rvu->cgx_idmap) + return -ENOMEM; + + /* Initialize the cgxdata table */ + for (i = 0; i < rvu->cgx_cnt; i++) + rvu->cgx_idmap[i] = cgx_get_pdata(i); + + /* Map CGX LMAC interfaces to RVU PFs */ + err = rvu_map_cgx_lmac_pf(rvu); + if (err) + return err; + + /* Register for CGX events */ + cgx_lmac_event_handler_init(rvu); + return 0; +} + +int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) +{ + int pf = rvu_get_pf(pcifunc); + u8 cgx_id, lmac_id; + + /* This msg is expected only from PFs that are mapped to CGX LMACs, + * if received from other PF/VF simply ACK, nothing to do. + */ + if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start); + + return 0; +} + +int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true); + return 0; +} + +int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false); + return 0; +} + +int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req, + struct cgx_stats_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + int stat = 0, err = 0; + u64 tx_stat, rx_stat; + u8 cgx_idx, lmac; + void *cgxd; + + if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) || + !is_pf_cgxmapped(rvu, pf)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); + cgxd = rvu_cgx_pdata(cgx_idx, rvu); + + /* Rx stats */ + while (stat < CGX_RX_STATS_COUNT) { + err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat); + if (err) + return err; + rsp->rx_stats[stat] = rx_stat; + stat++; + } + + /* Tx stats */ + stat = 0; + while (stat < CGX_TX_STATS_COUNT) { + err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat); + if (err) + return err; + rsp->tx_stats[stat] = tx_stat; + stat++; + } + return 0; +} + +int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu, + struct cgx_mac_addr_set_or_get *req, + struct cgx_mac_addr_set_or_get *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr); + + return 0; +} + +int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu, + struct cgx_mac_addr_set_or_get *req, + struct cgx_mac_addr_set_or_get *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + int rc = 0, i; + u64 cfg; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + rsp->hdr.rc = rc; + cfg = cgx_lmac_addr_get(cgx_id, lmac_id); + /* copy 48 bit mac address to req->mac_addr */ + for (i = 0; i < ETH_ALEN; i++) + rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8; + return 0; +} + +int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int pf = rvu_get_pf(pcifunc); + u8 cgx_id, lmac_id; + + /* This msg is expected only from PFs that are mapped to CGX LMACs, + * if received from other PF/VF simply ACK, nothing to do. + */ + if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) || + !is_pf_cgxmapped(rvu, pf)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + cgx_lmac_promisc_config(cgx_id, lmac_id, true); + return 0; +} + +int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int pf = rvu_get_pf(pcifunc); + u8 cgx_id, lmac_id; + + /* This msg is expected only from PFs that are mapped to CGX LMACs, + * if received from other PF/VF simply ACK, nothing to do. + */ + if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) || + !is_pf_cgxmapped(rvu, pf)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + cgx_lmac_promisc_config(cgx_id, lmac_id, false); + return 0; +} + +static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) +{ + int pf = rvu_get_pf(pcifunc); + u8 cgx_id, lmac_id; + + /* This msg is expected only from PFs that are mapped to CGX LMACs, + * if received from other PF/VF simply ACK, nothing to do. + */ + if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + if (en) { + set_bit(pf, &rvu->pf_notify_bmap); + /* Send the current link status to PF */ + rvu_cgx_send_link_info(cgx_id, lmac_id, rvu); + } else { + clear_bit(pf, &rvu->pf_notify_bmap); + } + + return 0; +} + +int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true); + return 0; +} + +int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false); + return 0; +} + +int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req, + struct cgx_link_info_msg *rsp) +{ + u8 cgx_id, lmac_id; + int pf, err; + + pf = rvu_get_pf(req->hdr.pcifunc); + + if (!is_pf_cgxmapped(rvu, pf)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, + &rsp->link_info); + return err; +} + +static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) +{ + int pf = rvu_get_pf(pcifunc); + u8 cgx_id, lmac_id; + + /* This msg is expected only from PFs that are mapped to CGX LMACs, + * if received from other PF/VF simply ACK, nothing to do. + */ + if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf)) + return -ENODEV; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu), + lmac_id, en); +} + +int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true); + return 0; +} + +int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false); + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c new file mode 100644 index 000000000000..8890c95831ca --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -0,0 +1,1959 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu_struct.h" +#include "rvu_reg.h" +#include "rvu.h" +#include "npc.h" +#include "cgx.h" + +static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add); + +enum mc_tbl_sz { + MC_TBL_SZ_256, + MC_TBL_SZ_512, + MC_TBL_SZ_1K, + MC_TBL_SZ_2K, + MC_TBL_SZ_4K, + MC_TBL_SZ_8K, + MC_TBL_SZ_16K, + MC_TBL_SZ_32K, + MC_TBL_SZ_64K, +}; + +enum mc_buf_cnt { + MC_BUF_CNT_8, + MC_BUF_CNT_16, + MC_BUF_CNT_32, + MC_BUF_CNT_64, + MC_BUF_CNT_128, + MC_BUF_CNT_256, + MC_BUF_CNT_512, + MC_BUF_CNT_1024, + MC_BUF_CNT_2048, +}; + +/* For now considering MC resources needed for broadcast + * pkt replication only. i.e 256 HWVFs + 12 PFs. + */ +#define MC_TBL_SIZE MC_TBL_SZ_512 +#define MC_BUF_CNT MC_BUF_CNT_128 + +struct mce { + struct hlist_node node; + u16 idx; + u16 pcifunc; +}; + +int rvu_get_nixlf_count(struct rvu *rvu) +{ + struct rvu_block *block; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return 0; + block = &rvu->hw->block[blkaddr]; + return block->lf.max; +} + +static void nix_mce_list_init(struct nix_mce_list *list, int max) +{ + INIT_HLIST_HEAD(&list->head); + list->count = 0; + list->max = max; +} + +static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) +{ + int idx; + + if (!mcast) + return 0; + + idx = mcast->next_free_mce; + mcast->next_free_mce += count; + return idx; +} + +static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) +{ + if (blkaddr == BLKADDR_NIX0 && hw->nix0) + return hw->nix0; + + return NULL; +} + +static bool is_valid_txschq(struct rvu *rvu, int blkaddr, + int lvl, u16 pcifunc, u16 schq) +{ + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return false; + + txsch = &nix_hw->txsch[lvl]; + /* Check out of bounds */ + if (schq >= txsch->schq.max) + return false; + + spin_lock(&rvu->rsrc_lock); + if (txsch->pfvf_map[schq] != pcifunc) { + spin_unlock(&rvu->rsrc_lock); + return false; + } + spin_unlock(&rvu->rsrc_lock); + return true; +} + +static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + u8 cgx_id, lmac_id; + int pkind, pf; + int err; + + pf = rvu_get_pf(pcifunc); + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + return 0; + + switch (type) { + case NIX_INTF_TYPE_CGX: + pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; + rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); + + pkind = rvu_npc_get_pkind(rvu, pf); + if (pkind < 0) { + dev_err(rvu->dev, + "PF_Func 0x%x: Invalid pkind\n", pcifunc); + return -EINVAL; + } + pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0); + pfvf->tx_chan_base = pfvf->rx_chan_base; + pfvf->rx_chan_cnt = 1; + pfvf->tx_chan_cnt = 1; + cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); + rvu_npc_set_pkind(rvu, pkind, pfvf); + break; + case NIX_INTF_TYPE_LBK: + break; + } + + /* Add a UCAST forwarding rule in MCAM with this NIXLF attached + * RVU PF/VF's MAC address. + */ + rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, pfvf->mac_addr); + + /* Add this PF_FUNC to bcast pkt replication list */ + err = nix_update_bcast_mce_list(rvu, pcifunc, true); + if (err) { + dev_err(rvu->dev, + "Bcast list, failed to enable PF_FUNC 0x%x\n", + pcifunc); + return err; + } + + rvu_npc_install_bcast_match_entry(rvu, pcifunc, + nixlf, pfvf->rx_chan_base); + + return 0; +} + +static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) +{ + int err; + + /* Remove this PF_FUNC from bcast pkt replication list */ + err = nix_update_bcast_mce_list(rvu, pcifunc, false); + if (err) { + dev_err(rvu->dev, + "Bcast list, failed to disable PF_FUNC 0x%x\n", + pcifunc); + } + + /* Free and disable any MCAM entries used by this NIX LF */ + rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); +} + +static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, + u64 format, bool v4, u64 *fidx) +{ + struct nix_lso_format field = {0}; + + /* IP's Length field */ + field.layer = NIX_TXLAYER_OL3; + /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ + field.offset = v4 ? 2 : 4; + field.sizem1 = 1; /* i.e 2 bytes */ + field.alg = NIX_LSOALG_ADD_PAYLEN; + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), + *(u64 *)&field); + + /* No ID field in IPv6 header */ + if (!v4) + return; + + /* IP's ID field */ + field.layer = NIX_TXLAYER_OL3; + field.offset = 4; + field.sizem1 = 1; /* i.e 2 bytes */ + field.alg = NIX_LSOALG_ADD_SEGNUM; + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), + *(u64 *)&field); +} + +static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, + u64 format, u64 *fidx) +{ + struct nix_lso_format field = {0}; + + /* TCP's sequence number field */ + field.layer = NIX_TXLAYER_OL4; + field.offset = 4; + field.sizem1 = 3; /* i.e 4 bytes */ + field.alg = NIX_LSOALG_ADD_OFFSET; + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), + *(u64 *)&field); + + /* TCP's flags field */ + field.layer = NIX_TXLAYER_OL4; + field.offset = 12; + field.sizem1 = 0; /* not needed */ + field.alg = NIX_LSOALG_TCP_FLAGS; + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), + *(u64 *)&field); +} + +static void nix_setup_lso(struct rvu *rvu, int blkaddr) +{ + u64 cfg, idx, fidx = 0; + + /* Enable LSO */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); + /* For TSO, set first and middle segment flags to + * mask out PSH, RST & FIN flags in TCP packet + */ + cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); + cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); + rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); + + /* Configure format fields for TCPv4 segmentation offload */ + idx = NIX_LSO_FORMAT_IDX_TSOV4; + nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); + nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); + + /* Set rest of the fields to NOP */ + for (; fidx < 8; fidx++) { + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); + } + + /* Configure format fields for TCPv6 segmentation offload */ + idx = NIX_LSO_FORMAT_IDX_TSOV6; + fidx = 0; + nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); + nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); + + /* Set rest of the fields to NOP */ + for (; fidx < 8; fidx++) { + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); + } +} + +static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) +{ + kfree(pfvf->rq_bmap); + kfree(pfvf->sq_bmap); + kfree(pfvf->cq_bmap); + if (pfvf->rq_ctx) + qmem_free(rvu->dev, pfvf->rq_ctx); + if (pfvf->sq_ctx) + qmem_free(rvu->dev, pfvf->sq_ctx); + if (pfvf->cq_ctx) + qmem_free(rvu->dev, pfvf->cq_ctx); + if (pfvf->rss_ctx) + qmem_free(rvu->dev, pfvf->rss_ctx); + if (pfvf->nix_qints_ctx) + qmem_free(rvu->dev, pfvf->nix_qints_ctx); + if (pfvf->cq_ints_ctx) + qmem_free(rvu->dev, pfvf->cq_ints_ctx); + + pfvf->rq_bmap = NULL; + pfvf->cq_bmap = NULL; + pfvf->sq_bmap = NULL; + pfvf->rq_ctx = NULL; + pfvf->sq_ctx = NULL; + pfvf->cq_ctx = NULL; + pfvf->rss_ctx = NULL; + pfvf->nix_qints_ctx = NULL; + pfvf->cq_ints_ctx = NULL; +} + +static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, + struct rvu_pfvf *pfvf, int nixlf, + int rss_sz, int rss_grps, int hwctx_size) +{ + int err, grp, num_indices; + + /* RSS is not requested for this NIXLF */ + if (!rss_sz) + return 0; + num_indices = rss_sz * rss_grps; + + /* Alloc NIX RSS HW context memory and config the base */ + err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); + if (err) + return err; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), + (u64)pfvf->rss_ctx->iova); + + /* Config full RSS table size, enable RSS and caching */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), + BIT_ULL(36) | BIT_ULL(4) | + ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE)); + /* Config RSS group offset and sizes */ + for (grp = 0; grp < rss_grps; grp++) + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), + ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); + return 0; +} + +static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, + struct nix_aq_inst_s *inst) +{ + struct admin_queue *aq = block->aq; + struct nix_aq_res_s *result; + int timeout = 1000; + u64 reg, head; + + result = (struct nix_aq_res_s *)aq->res->base; + + /* Get current head pointer where to append this instruction */ + reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); + head = (reg >> 4) & AQ_PTR_MASK; + + memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), + (void *)inst, aq->inst->entry_sz); + memset(result, 0, sizeof(*result)); + /* sync into memory */ + wmb(); + + /* Ring the doorbell and wait for result */ + rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); + while (result->compcode == NIX_AQ_COMP_NOTDONE) { + cpu_relax(); + udelay(1); + timeout--; + if (!timeout) + return -EBUSY; + } + + if (result->compcode != NIX_AQ_COMP_GOOD) + /* TODO: Replace this with some error code */ + return -EBUSY; + + return 0; +} + +static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int nixlf, blkaddr, rc = 0; + struct nix_aq_inst_s inst; + struct rvu_block *block; + struct admin_queue *aq; + struct rvu_pfvf *pfvf; + void *ctx, *mask; + bool ena; + u64 cfg; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + aq = block->aq; + if (!aq) { + dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); + return NIX_AF_ERR_AQ_ENQUEUE; + } + + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + switch (req->ctype) { + case NIX_AQ_CTYPE_RQ: + /* Check if index exceeds max no of queues */ + if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) + rc = NIX_AF_ERR_AQ_ENQUEUE; + break; + case NIX_AQ_CTYPE_SQ: + if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) + rc = NIX_AF_ERR_AQ_ENQUEUE; + break; + case NIX_AQ_CTYPE_CQ: + if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) + rc = NIX_AF_ERR_AQ_ENQUEUE; + break; + case NIX_AQ_CTYPE_RSS: + /* Check if RSS is enabled and qidx is within range */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); + if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || + (req->qidx >= (256UL << (cfg & 0xF)))) + rc = NIX_AF_ERR_AQ_ENQUEUE; + break; + case NIX_AQ_CTYPE_MCE: + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); + /* Check if index exceeds MCE list length */ + if (!hw->nix0->mcast.mce_ctx || + (req->qidx >= (256UL << (cfg & 0xF)))) + rc = NIX_AF_ERR_AQ_ENQUEUE; + + /* Adding multicast lists for requests from PF/VFs is not + * yet supported, so ignore this. + */ + if (rsp) + rc = NIX_AF_ERR_AQ_ENQUEUE; + break; + default: + rc = NIX_AF_ERR_AQ_ENQUEUE; + } + + if (rc) + return rc; + + /* Check if SQ pointed SMQ belongs to this PF/VF or not */ + if (req->ctype == NIX_AQ_CTYPE_SQ && + req->op != NIX_AQ_INSTOP_WRITE) { + if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, + pcifunc, req->sq.smq)) + return NIX_AF_ERR_AQ_ENQUEUE; + } + + memset(&inst, 0, sizeof(struct nix_aq_inst_s)); + inst.lf = nixlf; + inst.cindex = req->qidx; + inst.ctype = req->ctype; + inst.op = req->op; + /* Currently we are not supporting enqueuing multiple instructions, + * so always choose first entry in result memory. + */ + inst.res_addr = (u64)aq->res->iova; + + /* Clean result + context memory */ + memset(aq->res->base, 0, aq->res->entry_sz); + /* Context needs to be written at RES_ADDR + 128 */ + ctx = aq->res->base + 128; + /* Mask needs to be written at RES_ADDR + 256 */ + mask = aq->res->base + 256; + + switch (req->op) { + case NIX_AQ_INSTOP_WRITE: + if (req->ctype == NIX_AQ_CTYPE_RQ) + memcpy(mask, &req->rq_mask, + sizeof(struct nix_rq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_SQ) + memcpy(mask, &req->sq_mask, + sizeof(struct nix_sq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_CQ) + memcpy(mask, &req->cq_mask, + sizeof(struct nix_cq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_RSS) + memcpy(mask, &req->rss_mask, + sizeof(struct nix_rsse_s)); + else if (req->ctype == NIX_AQ_CTYPE_MCE) + memcpy(mask, &req->mce_mask, + sizeof(struct nix_rx_mce_s)); + /* Fall through */ + case NIX_AQ_INSTOP_INIT: + if (req->ctype == NIX_AQ_CTYPE_RQ) + memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_SQ) + memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_CQ) + memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_RSS) + memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); + else if (req->ctype == NIX_AQ_CTYPE_MCE) + memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); + break; + case NIX_AQ_INSTOP_NOP: + case NIX_AQ_INSTOP_READ: + case NIX_AQ_INSTOP_LOCK: + case NIX_AQ_INSTOP_UNLOCK: + break; + default: + rc = NIX_AF_ERR_AQ_ENQUEUE; + return rc; + } + + spin_lock(&aq->lock); + + /* Submit the instruction to AQ */ + rc = nix_aq_enqueue_wait(rvu, block, &inst); + if (rc) { + spin_unlock(&aq->lock); + return rc; + } + + /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ + if (req->op == NIX_AQ_INSTOP_INIT) { + if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) + __set_bit(req->qidx, pfvf->rq_bmap); + if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) + __set_bit(req->qidx, pfvf->sq_bmap); + if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) + __set_bit(req->qidx, pfvf->cq_bmap); + } + + if (req->op == NIX_AQ_INSTOP_WRITE) { + if (req->ctype == NIX_AQ_CTYPE_RQ) { + ena = (req->rq.ena & req->rq_mask.ena) | + (test_bit(req->qidx, pfvf->rq_bmap) & + ~req->rq_mask.ena); + if (ena) + __set_bit(req->qidx, pfvf->rq_bmap); + else + __clear_bit(req->qidx, pfvf->rq_bmap); + } + if (req->ctype == NIX_AQ_CTYPE_SQ) { + ena = (req->rq.ena & req->sq_mask.ena) | + (test_bit(req->qidx, pfvf->sq_bmap) & + ~req->sq_mask.ena); + if (ena) + __set_bit(req->qidx, pfvf->sq_bmap); + else + __clear_bit(req->qidx, pfvf->sq_bmap); + } + if (req->ctype == NIX_AQ_CTYPE_CQ) { + ena = (req->rq.ena & req->cq_mask.ena) | + (test_bit(req->qidx, pfvf->cq_bmap) & + ~req->cq_mask.ena); + if (ena) + __set_bit(req->qidx, pfvf->cq_bmap); + else + __clear_bit(req->qidx, pfvf->cq_bmap); + } + } + + if (rsp) { + /* Copy read context into mailbox */ + if (req->op == NIX_AQ_INSTOP_READ) { + if (req->ctype == NIX_AQ_CTYPE_RQ) + memcpy(&rsp->rq, ctx, + sizeof(struct nix_rq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_SQ) + memcpy(&rsp->sq, ctx, + sizeof(struct nix_sq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_CQ) + memcpy(&rsp->cq, ctx, + sizeof(struct nix_cq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_RSS) + memcpy(&rsp->rss, ctx, + sizeof(struct nix_cq_ctx_s)); + else if (req->ctype == NIX_AQ_CTYPE_MCE) + memcpy(&rsp->mce, ctx, + sizeof(struct nix_rx_mce_s)); + } + } + + spin_unlock(&aq->lock); + return 0; +} + +static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + struct nix_aq_enq_req aq_req; + unsigned long *bmap; + int qidx, q_cnt = 0; + int err = 0, rc; + + if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) + return NIX_AF_ERR_AQ_ENQUEUE; + + memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); + aq_req.hdr.pcifunc = req->hdr.pcifunc; + + if (req->ctype == NIX_AQ_CTYPE_CQ) { + aq_req.cq.ena = 0; + aq_req.cq_mask.ena = 1; + q_cnt = pfvf->cq_ctx->qsize; + bmap = pfvf->cq_bmap; + } + if (req->ctype == NIX_AQ_CTYPE_SQ) { + aq_req.sq.ena = 0; + aq_req.sq_mask.ena = 1; + q_cnt = pfvf->sq_ctx->qsize; + bmap = pfvf->sq_bmap; + } + if (req->ctype == NIX_AQ_CTYPE_RQ) { + aq_req.rq.ena = 0; + aq_req.rq_mask.ena = 1; + q_cnt = pfvf->rq_ctx->qsize; + bmap = pfvf->rq_bmap; + } + + aq_req.ctype = req->ctype; + aq_req.op = NIX_AQ_INSTOP_WRITE; + + for (qidx = 0; qidx < q_cnt; qidx++) { + if (!test_bit(qidx, bmap)) + continue; + aq_req.qidx = qidx; + rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); + if (rc) { + err = rc; + dev_err(rvu->dev, "Failed to disable %s:%d context\n", + (req->ctype == NIX_AQ_CTYPE_CQ) ? + "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ? + "RQ" : "SQ"), qidx); + } + } + + return err; +} + +int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu, + struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) +{ + return rvu_nix_aq_enq_inst(rvu, req, rsp); +} + +int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu, + struct hwctx_disable_req *req, + struct msg_rsp *rsp) +{ + return nix_lf_hwctx_disable(rvu, req); +} + +int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, + struct nix_lf_alloc_req *req, + struct nix_lf_alloc_rsp *rsp) +{ + int nixlf, qints, hwctx_size, err, rc = 0; + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + u64 cfg, ctx_cfg; + int blkaddr; + + if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) + return NIX_AF_ERR_PARAM; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + /* If RSS is being enabled, check if requested config is valid. + * RSS table size should be power of two, otherwise + * RSS_GRP::OFFSET + adder might go beyond that group or + * won't be able to use entire table. + */ + if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || + !is_power_of_2(req->rss_sz))) + return NIX_AF_ERR_RSS_SIZE_INVALID; + + if (req->rss_sz && + (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) + return NIX_AF_ERR_RSS_GRPS_INVALID; + + /* Reset this NIX LF */ + err = rvu_lf_reset(rvu, block, nixlf); + if (err) { + dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", + block->addr - BLKADDR_NIX0, nixlf); + return NIX_AF_ERR_LF_RESET; + } + + ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); + + /* Alloc NIX RQ HW context memory and config the base */ + hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); + if (err) + goto free_mem; + + pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); + if (!pfvf->rq_bmap) + goto free_mem; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), + (u64)pfvf->rq_ctx->iova); + + /* Set caching and queue count in HW */ + cfg = BIT_ULL(36) | (req->rq_cnt - 1); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); + + /* Alloc NIX SQ HW context memory and config the base */ + hwctx_size = 1UL << (ctx_cfg & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); + if (err) + goto free_mem; + + pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); + if (!pfvf->sq_bmap) + goto free_mem; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), + (u64)pfvf->sq_ctx->iova); + cfg = BIT_ULL(36) | (req->sq_cnt - 1); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); + + /* Alloc NIX CQ HW context memory and config the base */ + hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); + if (err) + goto free_mem; + + pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); + if (!pfvf->cq_bmap) + goto free_mem; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), + (u64)pfvf->cq_ctx->iova); + cfg = BIT_ULL(36) | (req->cq_cnt - 1); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); + + /* Initialize receive side scaling (RSS) */ + hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); + err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, + req->rss_sz, req->rss_grps, hwctx_size); + if (err) + goto free_mem; + + /* Alloc memory for CQINT's HW contexts */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + qints = (cfg >> 24) & 0xFFF; + hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); + if (err) + goto free_mem; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), + (u64)pfvf->cq_ints_ctx->iova); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36)); + + /* Alloc memory for QINT's HW contexts */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + qints = (cfg >> 12) & 0xFFF; + hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); + if (err) + goto free_mem; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), + (u64)pfvf->nix_qints_ctx->iova); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36)); + + /* Enable LMTST for this NIX LF */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); + + /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC + * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's + * PCIFUNC itself. + */ + if (req->npa_func == RVU_DEFAULT_PF_FUNC) + cfg = pcifunc; + else + cfg = req->npa_func; + + if (req->sso_func == RVU_DEFAULT_PF_FUNC) + cfg |= (u64)pcifunc << 16; + else + cfg |= (u64)req->sso_func << 16; + + cfg |= (u64)req->xqe_sz << 33; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); + + /* Config Rx pkt length, csum checks and apad enable / disable */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); + + err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf); + if (err) + goto free_mem; + + goto exit; + +free_mem: + nix_ctx_free(rvu, pfvf); + rc = -ENOMEM; + +exit: + /* Set macaddr of this PF/VF */ + ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); + + /* set SQB size info */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); + rsp->sqb_size = (cfg >> 34) & 0xFFFF; + rsp->rx_chan_base = pfvf->rx_chan_base; + rsp->tx_chan_base = pfvf->tx_chan_base; + rsp->rx_chan_cnt = pfvf->rx_chan_cnt; + rsp->tx_chan_cnt = pfvf->tx_chan_cnt; + rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; + rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; + return rc; +} + +int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + int blkaddr, nixlf, err; + struct rvu_pfvf *pfvf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_interface_deinit(rvu, pcifunc, nixlf); + + /* Reset this NIX LF */ + err = rvu_lf_reset(rvu, block, nixlf); + if (err) { + dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", + block->addr - BLKADDR_NIX0, nixlf); + return NIX_AF_ERR_LF_RESET; + } + + nix_ctx_free(rvu, pfvf); + + return 0; +} + +/* Disable shaping of pkts by a scheduler queue + * at a given scheduler level. + */ +static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, + int lvl, int schq) +{ + u64 cir_reg = 0, pir_reg = 0; + u64 cfg; + + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + cir_reg = NIX_AF_TL1X_CIR(schq); + pir_reg = 0; /* PIR not available at TL1 */ + break; + case NIX_TXSCH_LVL_TL2: + cir_reg = NIX_AF_TL2X_CIR(schq); + pir_reg = NIX_AF_TL2X_PIR(schq); + break; + case NIX_TXSCH_LVL_TL3: + cir_reg = NIX_AF_TL3X_CIR(schq); + pir_reg = NIX_AF_TL3X_PIR(schq); + break; + case NIX_TXSCH_LVL_TL4: + cir_reg = NIX_AF_TL4X_CIR(schq); + pir_reg = NIX_AF_TL4X_PIR(schq); + break; + } + + if (!cir_reg) + return; + cfg = rvu_read64(rvu, blkaddr, cir_reg); + rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); + + if (!pir_reg) + return; + cfg = rvu_read64(rvu, blkaddr, pir_reg); + rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); +} + +static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, + int lvl, int schq) +{ + struct rvu_hwinfo *hw = rvu->hw; + int link; + + /* Reset TL4's SDP link config */ + if (lvl == NIX_TXSCH_LVL_TL4) + rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); + + if (lvl != NIX_TXSCH_LVL_TL2) + return; + + /* Reset TL2's CGX or LBK link config */ + for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); +} + +int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, + struct nix_txsch_alloc_req *req, + struct nix_txsch_alloc_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct nix_txsch *txsch; + int lvl, idx, req_schq; + struct rvu_pfvf *pfvf; + struct nix_hw *nix_hw; + int blkaddr, rc = 0; + u16 schq; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + spin_lock(&rvu->rsrc_lock); + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + req_schq = req->schq_contig[lvl] + req->schq[lvl]; + + /* There are only 28 TL1s */ + if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max) + goto err; + + /* Check if request is valid */ + if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) + goto err; + + /* If contiguous queues are needed, check for availability */ + if (req->schq_contig[lvl] && + !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) + goto err; + + /* Check if full request can be accommodated */ + if (req_schq >= rvu_rsrc_free_count(&txsch->schq)) + goto err; + } + + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + rsp->schq_contig[lvl] = req->schq_contig[lvl]; + rsp->schq[lvl] = req->schq[lvl]; + + schq = 0; + /* Alloc contiguous queues first */ + if (req->schq_contig[lvl]) { + schq = rvu_alloc_rsrc_contig(&txsch->schq, + req->schq_contig[lvl]); + + for (idx = 0; idx < req->schq_contig[lvl]; idx++) { + txsch->pfvf_map[schq] = pcifunc; + nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); + rsp->schq_contig_list[lvl][idx] = schq; + schq++; + } + } + + /* Alloc non-contiguous queues */ + for (idx = 0; idx < req->schq[lvl]; idx++) { + schq = rvu_alloc_rsrc(&txsch->schq); + txsch->pfvf_map[schq] = pcifunc; + nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); + rsp->schq_list[lvl][idx] = schq; + } + } + goto exit; +err: + rc = NIX_AF_ERR_TLX_ALLOC_FAIL; +exit: + spin_unlock(&rvu->rsrc_lock); + return rc; +} + +static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) +{ + int blkaddr, nixlf, lvl, schq, err; + struct rvu_hwinfo *hw = rvu->hw; + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + /* Disable TL2/3 queue links before SMQ flush*/ + spin_lock(&rvu->rsrc_lock); + for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) + continue; + + txsch = &nix_hw->txsch[lvl]; + for (schq = 0; schq < txsch->schq.max; schq++) { + if (txsch->pfvf_map[schq] != pcifunc) + continue; + nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + } + } + + /* Flush SMQs */ + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + for (schq = 0; schq < txsch->schq.max; schq++) { + if (txsch->pfvf_map[schq] != pcifunc) + continue; + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); + /* Do SMQ flush and set enqueue xoff */ + cfg |= BIT_ULL(50) | BIT_ULL(49); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); + + /* Wait for flush to complete */ + err = rvu_poll_reg(rvu, blkaddr, + NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true); + if (err) { + dev_err(rvu->dev, + "NIXLF%d: SMQ%d flush failed\n", nixlf, schq); + } + } + + /* Now free scheduler queues to free pool */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + for (schq = 0; schq < txsch->schq.max; schq++) { + if (txsch->pfvf_map[schq] != pcifunc) + continue; + rvu_free_rsrc(&txsch->schq, schq); + txsch->pfvf_map[schq] = 0; + } + } + spin_unlock(&rvu->rsrc_lock); + + /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ + rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); + err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); + if (err) + dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); + + return 0; +} + +int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu, + struct nix_txsch_free_req *req, + struct msg_rsp *rsp) +{ + return nix_txschq_free(rvu, req->hdr.pcifunc); +} + +static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, + int lvl, u64 reg, u64 regval) +{ + u64 regbase = reg & 0xFFFF; + u16 schq, parent; + + if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) + return false; + + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + /* Check if this schq belongs to this PF/VF or not */ + if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) + return false; + + parent = (regval >> 16) & 0x1FF; + /* Validate MDQ's TL4 parent */ + if (regbase == NIX_AF_MDQX_PARENT(0) && + !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) + return false; + + /* Validate TL4's TL3 parent */ + if (regbase == NIX_AF_TL4X_PARENT(0) && + !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) + return false; + + /* Validate TL3's TL2 parent */ + if (regbase == NIX_AF_TL3X_PARENT(0) && + !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) + return false; + + /* Validate TL2's TL1 parent */ + if (regbase == NIX_AF_TL2X_PARENT(0) && + !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) + return false; + + return true; +} + +int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu, + struct nix_txschq_config *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + u64 reg, regval, schq_regbase; + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + int blkaddr, idx, err; + int nixlf; + + if (req->lvl >= NIX_TXSCH_LVL_CNT || + req->num_regs > MAX_REGS_PER_MBOX_MSG) + return NIX_AF_INVAL_TXSCHQ_CFG; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + txsch = &nix_hw->txsch[req->lvl]; + for (idx = 0; idx < req->num_regs; idx++) { + reg = req->reg[idx]; + regval = req->regval[idx]; + schq_regbase = reg & 0xFFFF; + + if (!is_txschq_config_valid(rvu, pcifunc, blkaddr, + txsch->lvl, reg, regval)) + return NIX_AF_INVAL_TXSCHQ_CFG; + + /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ + if (schq_regbase == NIX_AF_SMQX_CFG(0)) { + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], + pcifunc, 0); + regval &= ~(0x7FULL << 24); + regval |= ((u64)nixlf << 24); + } + + rvu_write64(rvu, blkaddr, reg, regval); + + /* Check for SMQ flush, if so, poll for its completion */ + if (schq_regbase == NIX_AF_SMQX_CFG(0) && + (regval & BIT_ULL(49))) { + err = rvu_poll_reg(rvu, blkaddr, + reg, BIT_ULL(49), true); + if (err) + return NIX_AF_SMQ_FLUSH_FAILED; + } + } + return 0; +} + +static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, + struct nix_vtag_config *req) +{ + u64 regval = 0; + +#define NIX_VTAGTYPE_MAX 0x8ull +#define NIX_VTAGSIZE_MASK 0x7ull +#define NIX_VTAGSTRIP_CAP_MASK 0x30ull + + if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX || + req->vtag_size > VTAGSIZE_T8) + return -EINVAL; + + regval = rvu_read64(rvu, blkaddr, + NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type)); + + if (req->rx.strip_vtag && req->rx.capture_vtag) + regval |= BIT_ULL(4) | BIT_ULL(5); + else if (req->rx.strip_vtag) + regval |= BIT_ULL(4); + else + regval &= ~(BIT_ULL(4) | BIT_ULL(5)); + + regval &= ~NIX_VTAGSIZE_MASK; + regval |= req->vtag_size & NIX_VTAGSIZE_MASK; + + rvu_write64(rvu, blkaddr, + NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); + return 0; +} + +int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu, + struct nix_vtag_config *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, nixlf, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (req->cfg_type) { + err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); + if (err) + return NIX_AF_ERR_PARAM; + } else { + /* TODO: handle tx vtag configuration */ + return 0; + } + + return 0; +} + +static int nix_setup_mce(struct rvu *rvu, int mce, u8 op, + u16 pcifunc, int next, bool eol) +{ + struct nix_aq_enq_req aq_req; + int err; + + aq_req.hdr.pcifunc = pcifunc; + aq_req.ctype = NIX_AQ_CTYPE_MCE; + aq_req.op = op; + aq_req.qidx = mce; + + /* Forward bcast pkts to RQ0, RSS not needed */ + aq_req.mce.op = 0; + aq_req.mce.index = 0; + aq_req.mce.eol = eol; + aq_req.mce.pf_func = pcifunc; + aq_req.mce.next = next; + + /* All fields valid */ + *(u64 *)(&aq_req.mce_mask) = ~0ULL; + + err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); + if (err) { + dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", + rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); + return err; + } + return 0; +} + +static int nix_update_mce_list(struct nix_mce_list *mce_list, + u16 pcifunc, int idx, bool add) +{ + struct mce *mce, *tail = NULL; + bool delete = false; + + /* Scan through the current list */ + hlist_for_each_entry(mce, &mce_list->head, node) { + /* If already exists, then delete */ + if (mce->pcifunc == pcifunc && !add) { + delete = true; + break; + } + tail = mce; + } + + if (delete) { + hlist_del(&mce->node); + kfree(mce); + mce_list->count--; + return 0; + } + + if (!add) + return 0; + + /* Add a new one to the list, at the tail */ + mce = kzalloc(sizeof(*mce), GFP_KERNEL); + if (!mce) + return -ENOMEM; + mce->idx = idx; + mce->pcifunc = pcifunc; + if (!tail) + hlist_add_head(&mce->node, &mce_list->head); + else + hlist_add_behind(&mce->node, &tail->node); + mce_list->count++; + return 0; +} + +static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) +{ + int err = 0, idx, next_idx, count; + struct nix_mce_list *mce_list; + struct mce *mce, *next_mce; + struct nix_mcast *mcast; + struct nix_hw *nix_hw; + struct rvu_pfvf *pfvf; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return 0; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return 0; + + mcast = &nix_hw->mcast; + + /* Get this PF/VF func's MCE index */ + pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); + idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); + + mce_list = &pfvf->bcast_mce_list; + if (idx > (pfvf->bcast_mce_idx + mce_list->max)) { + dev_err(rvu->dev, + "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", + __func__, idx, mce_list->max, + pcifunc >> RVU_PFVF_PF_SHIFT); + return -EINVAL; + } + + spin_lock(&mcast->mce_lock); + + err = nix_update_mce_list(mce_list, pcifunc, idx, add); + if (err) + goto end; + + /* Disable MCAM entry in NPC */ + + if (!mce_list->count) + goto end; + count = mce_list->count; + + /* Dump the updated list to HW */ + hlist_for_each_entry(mce, &mce_list->head, node) { + next_idx = 0; + count--; + if (count) { + next_mce = hlist_entry(mce->node.next, + struct mce, node); + next_idx = next_mce->idx; + } + /* EOL should be set in last MCE */ + err = nix_setup_mce(rvu, mce->idx, + NIX_AQ_INSTOP_WRITE, mce->pcifunc, + next_idx, count ? false : true); + if (err) + goto end; + } + +end: + spin_unlock(&mcast->mce_lock); + return err; +} + +static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) +{ + struct nix_mcast *mcast = &nix_hw->mcast; + int err, pf, numvfs, idx; + struct rvu_pfvf *pfvf; + u16 pcifunc; + u64 cfg; + + /* Skip PF0 (i.e AF) */ + for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + /* If PF is not enabled, nothing to do */ + if (!((cfg >> 20) & 0x01)) + continue; + /* Get numVFs attached to this PF */ + numvfs = (cfg >> 12) & 0xFF; + + pfvf = &rvu->pf[pf]; + /* Save the start MCE */ + pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); + + nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); + + for (idx = 0; idx < (numvfs + 1); idx++) { + /* idx-0 is for PF, followed by VFs */ + pcifunc = (pf << RVU_PFVF_PF_SHIFT); + pcifunc |= idx; + /* Add dummy entries now, so that we don't have to check + * for whether AQ_OP should be INIT/WRITE later on. + * Will be updated when a NIXLF is attached/detached to + * these PF/VFs. + */ + err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx, + NIX_AQ_INSTOP_INIT, + pcifunc, 0, true); + if (err) + return err; + } + } + return 0; +} + +static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) +{ + struct nix_mcast *mcast = &nix_hw->mcast; + struct rvu_hwinfo *hw = rvu->hw; + int err, size; + + size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; + size = (1ULL << size); + + /* Alloc memory for multicast/mirror replication entries */ + err = qmem_alloc(rvu->dev, &mcast->mce_ctx, + (256UL << MC_TBL_SIZE), size); + if (err) + return -ENOMEM; + + rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, + (u64)mcast->mce_ctx->iova); + + /* Set max list length equal to max no of VFs per PF + PF itself */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, + BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); + + /* Alloc memory for multicast replication buffers */ + size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; + err = qmem_alloc(rvu->dev, &mcast->mcast_buf, + (8UL << MC_BUF_CNT), size); + if (err) + return -ENOMEM; + + rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, + (u64)mcast->mcast_buf->iova); + + /* Alloc pkind for NIX internal RX multicast/mirror replay */ + mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); + + rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, + BIT_ULL(63) | (mcast->replay_pkind << 24) | + BIT_ULL(20) | MC_BUF_CNT); + + spin_lock_init(&mcast->mce_lock); + + return nix_setup_bcast_tables(rvu, nix_hw); +} + +static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) +{ + struct nix_txsch *txsch; + u64 cfg, reg; + int err, lvl; + + /* Get scheduler queue count of each type and alloc + * bitmap for each for alloc/free/attach operations. + */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + txsch->lvl = lvl; + switch (lvl) { + case NIX_TXSCH_LVL_SMQ: + reg = NIX_AF_MDQ_CONST; + break; + case NIX_TXSCH_LVL_TL4: + reg = NIX_AF_TL4_CONST; + break; + case NIX_TXSCH_LVL_TL3: + reg = NIX_AF_TL3_CONST; + break; + case NIX_TXSCH_LVL_TL2: + reg = NIX_AF_TL2_CONST; + break; + case NIX_TXSCH_LVL_TL1: + reg = NIX_AF_TL1_CONST; + break; + } + cfg = rvu_read64(rvu, blkaddr, reg); + txsch->schq.max = cfg & 0xFFFF; + err = rvu_alloc_bitmap(&txsch->schq); + if (err) + return err; + + /* Allocate memory for scheduler queues to + * PF/VF pcifunc mapping info. + */ + txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, + sizeof(u16), GFP_KERNEL); + if (!txsch->pfvf_map) + return -ENOMEM; + } + return 0; +} + +int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int i, nixlf, blkaddr; + u64 stats; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + /* Get stats count supported by HW */ + stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + + /* Reset tx stats */ + for (i = 0; i < ((stats >> 24) & 0xFF); i++) + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); + + /* Reset rx stats */ + for (i = 0; i < ((stats >> 32) & 0xFF); i++) + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); + + return 0; +} + +/* Returns the ALG index to be set into NPC_RX_ACTION */ +static int get_flowkey_alg_idx(u32 flow_cfg) +{ + u32 ip_cfg; + + flow_cfg &= ~FLOW_KEY_TYPE_PORT; + ip_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6; + if (flow_cfg == ip_cfg) + return FLOW_KEY_ALG_IP; + else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP)) + return FLOW_KEY_ALG_TCP; + else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP)) + return FLOW_KEY_ALG_UDP; + else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_SCTP)) + return FLOW_KEY_ALG_SCTP; + else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP)) + return FLOW_KEY_ALG_TCP_UDP; + else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP)) + return FLOW_KEY_ALG_TCP_SCTP; + else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP)) + return FLOW_KEY_ALG_UDP_SCTP; + else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | + FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP)) + return FLOW_KEY_ALG_TCP_UDP_SCTP; + + return FLOW_KEY_ALG_PORT; +} + +int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu, + struct nix_rss_flowkey_cfg *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int alg_idx, nixlf, blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + alg_idx = get_flowkey_alg_idx(req->flowkey_cfg); + + rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, + alg_idx, req->mcam_index); + return 0; +} + +static void set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) +{ + struct nix_rx_flowkey_alg *field = NULL; + int idx, key_type; + + if (!alg) + return; + + /* FIELD0: IPv4 + * FIELD1: IPv6 + * FIELD2: TCP/UDP/SCTP/ALL + * FIELD3: Unused + * FIELD4: Unused + * + * Each of the 32 possible flow key algorithm definitions should + * fall into above incremental config (except ALG0). Otherwise a + * single NPC MCAM entry is not sufficient for supporting RSS. + * + * If a different definition or combination needed then NPC MCAM + * has to be programmed to filter such pkts and it's action should + * point to this definition to calculate flowtag or hash. + */ + for (idx = 0; idx < 32; idx++) { + key_type = flow_cfg & BIT_ULL(idx); + if (!key_type) + continue; + switch (key_type) { + case FLOW_KEY_TYPE_PORT: + field = &alg[0]; + field->sel_chan = true; + /* This should be set to 1, when SEL_CHAN is set */ + field->bytesm1 = 1; + break; + case FLOW_KEY_TYPE_IPV4: + field = &alg[0]; + field->lid = NPC_LID_LC; + field->ltype_match = NPC_LT_LC_IP; + field->hdr_offset = 12; /* SIP offset */ + field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ + field->ltype_mask = 0xF; /* Match only IPv4 */ + break; + case FLOW_KEY_TYPE_IPV6: + field = &alg[1]; + field->lid = NPC_LID_LC; + field->ltype_match = NPC_LT_LC_IP6; + field->hdr_offset = 8; /* SIP offset */ + field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ + field->ltype_mask = 0xF; /* Match only IPv6 */ + break; + case FLOW_KEY_TYPE_TCP: + case FLOW_KEY_TYPE_UDP: + case FLOW_KEY_TYPE_SCTP: + field = &alg[2]; + field->lid = NPC_LID_LD; + field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ + if (key_type == FLOW_KEY_TYPE_TCP) + field->ltype_match |= NPC_LT_LD_TCP; + else if (key_type == FLOW_KEY_TYPE_UDP) + field->ltype_match |= NPC_LT_LD_UDP; + else if (key_type == FLOW_KEY_TYPE_SCTP) + field->ltype_match |= NPC_LT_LD_SCTP; + field->key_offset = 32; /* After IPv4/v6 SIP, DIP */ + field->ltype_mask = ~field->ltype_match; + break; + } + if (field) + field->ena = 1; + field = NULL; + } +} + +static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) +{ +#define FIELDS_PER_ALG 5 + u64 field[FLOW_KEY_ALG_MAX][FIELDS_PER_ALG]; + u32 flowkey_cfg, minkey_cfg; + int alg, fid; + + memset(&field, 0, sizeof(u64) * FLOW_KEY_ALG_MAX * FIELDS_PER_ALG); + + /* Only incoming channel number */ + flowkey_cfg = FLOW_KEY_TYPE_PORT; + set_flowkey_fields((void *)&field[FLOW_KEY_ALG_PORT], flowkey_cfg); + + /* For a incoming pkt if none of the fields match then flowkey + * will be zero, hence tag generated will also be zero. + * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will + * be used to queue the packet. + */ + + /* IPv4/IPv6 SIP/DIPs */ + flowkey_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6; + set_flowkey_fields((void *)&field[FLOW_KEY_ALG_IP], flowkey_cfg); + + /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ + minkey_cfg = flowkey_cfg; + flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP; + set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP], flowkey_cfg); + + /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ + flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP; + set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP], flowkey_cfg); + + /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ + flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_SCTP; + set_flowkey_fields((void *)&field[FLOW_KEY_ALG_SCTP], flowkey_cfg); + + /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ + flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP; + set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP], flowkey_cfg); + + /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ + flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP; + set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_SCTP], flowkey_cfg); + + /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ + flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP; + set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP_SCTP], flowkey_cfg); + + /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ + flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | + FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP; + set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP_SCTP], + flowkey_cfg); + + for (alg = 0; alg < FLOW_KEY_ALG_MAX; alg++) { + for (fid = 0; fid < FIELDS_PER_ALG; fid++) + rvu_write64(rvu, blkaddr, + NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), + field[alg][fid]); + } +} + +int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu, + struct nix_set_mac_addr *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + int blkaddr, nixlf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + ether_addr_copy(pfvf->mac_addr, req->mac_addr); + + rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, req->mac_addr); + return 0; +} + +int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req, + struct msg_rsp *rsp) +{ + bool allmulti = false, disable_promisc = false; + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + int blkaddr, nixlf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (req->mode & NIX_RX_MODE_PROMISC) + allmulti = false; + else if (req->mode & NIX_RX_MODE_ALLMULTI) + allmulti = true; + else + disable_promisc = true; + + if (disable_promisc) + rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); + else + rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, allmulti); + return 0; +} + +static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) +{ + int idx, err; + u64 status; + + /* Start X2P bus calibration */ + rvu_write64(rvu, blkaddr, NIX_AF_CFG, + rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); + /* Wait for calibration to complete */ + err = rvu_poll_reg(rvu, blkaddr, + NIX_AF_STATUS, BIT_ULL(10), false); + if (err) { + dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); + return err; + } + + status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); + /* Check if CGX devices are ready */ + for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) { + if (status & (BIT_ULL(16 + idx))) + continue; + dev_err(rvu->dev, + "CGX%d didn't respond to NIX X2P calibration\n", idx); + err = -EBUSY; + } + + /* Check if LBK is ready */ + if (!(status & BIT_ULL(19))) { + dev_err(rvu->dev, + "LBK didn't respond to NIX X2P calibration\n"); + err = -EBUSY; + } + + /* Clear 'calibrate_x2p' bit */ + rvu_write64(rvu, blkaddr, NIX_AF_CFG, + rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); + if (err || (status & 0x3FFULL)) + dev_err(rvu->dev, + "NIX X2P calibration failed, status 0x%llx\n", status); + if (err) + return err; + return 0; +} + +static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) +{ + u64 cfg; + int err; + + /* Set admin queue endianness */ + cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); +#ifdef __BIG_ENDIAN + cfg |= BIT_ULL(1); + rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); +#else + cfg &= ~BIT_ULL(1); + rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); +#endif + + /* Do not bypass NDC cache */ + cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); + cfg &= ~0x3FFEULL; + rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); + + /* Result structure can be followed by RQ/SQ/CQ context at + * RES + 128bytes and a write mask at RES + 256 bytes, depending on + * operation type. Alloc sufficient result memory for all operations. + */ + err = rvu_aq_alloc(rvu, &block->aq, + Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), + ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); + if (err) + return err; + + rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); + rvu_write64(rvu, block->addr, + NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); + return 0; +} + +int rvu_nix_init(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkaddr, err; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return 0; + block = &hw->block[blkaddr]; + + /* Calibrate X2P bus to check if CGX/LBK links are fine */ + err = nix_calibrate_x2p(rvu, blkaddr); + if (err) + return err; + + /* Set num of links of each type */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); + hw->cgx = (cfg >> 12) & 0xF; + hw->lmac_per_cgx = (cfg >> 8) & 0xF; + hw->cgx_links = hw->cgx * hw->lmac_per_cgx; + hw->lbk_links = 1; + hw->sdp_links = 1; + + /* Initialize admin queue */ + err = nix_aq_init(rvu, block); + if (err) + return err; + + /* Restore CINT timer delay to HW reset values */ + rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); + + /* Configure segmentation offload formats */ + nix_setup_lso(rvu, blkaddr); + + if (blkaddr == BLKADDR_NIX0) { + hw->nix0 = devm_kzalloc(rvu->dev, + sizeof(struct nix_hw), GFP_KERNEL); + if (!hw->nix0) + return -ENOMEM; + + err = nix_setup_txschq(rvu, hw->nix0, blkaddr); + if (err) + return err; + + err = nix_setup_mcast(rvu, hw->nix0, blkaddr); + if (err) + return err; + + /* Config Outer L2, IP, TCP and UDP's NPC layer info. + * This helps HW protocol checker to identify headers + * and validate length and checksums. + */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, + (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, + (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, + (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, + (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F); + + nix_rx_flowkey_alg_cfg(rvu, blkaddr); + } + return 0; +} + +void rvu_nix_freemem(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + struct nix_txsch *txsch; + struct nix_mcast *mcast; + struct nix_hw *nix_hw; + int blkaddr, lvl; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return; + + block = &hw->block[blkaddr]; + rvu_aq_free(rvu, block->aq); + + if (blkaddr == BLKADDR_NIX0) { + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return; + + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + kfree(txsch->schq.bmap); + } + + mcast = &nix_hw->mcast; + qmem_free(rvu->dev, mcast->mce_ctx); + qmem_free(rvu->dev, mcast->mcast_buf); + } +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c new file mode 100644 index 000000000000..7531fdc54fa1 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu_struct.h" +#include "rvu_reg.h" +#include "rvu.h" + +static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, + struct npa_aq_inst_s *inst) +{ + struct admin_queue *aq = block->aq; + struct npa_aq_res_s *result; + int timeout = 1000; + u64 reg, head; + + result = (struct npa_aq_res_s *)aq->res->base; + + /* Get current head pointer where to append this instruction */ + reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS); + head = (reg >> 4) & AQ_PTR_MASK; + + memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), + (void *)inst, aq->inst->entry_sz); + memset(result, 0, sizeof(*result)); + /* sync into memory */ + wmb(); + + /* Ring the doorbell and wait for result */ + rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1); + while (result->compcode == NPA_AQ_COMP_NOTDONE) { + cpu_relax(); + udelay(1); + timeout--; + if (!timeout) + return -EBUSY; + } + + if (result->compcode != NPA_AQ_COMP_GOOD) + /* TODO: Replace this with some error code */ + return -EBUSY; + + return 0; +} + +static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, npalf, rc = 0; + struct npa_aq_inst_s inst; + struct rvu_block *block; + struct admin_queue *aq; + struct rvu_pfvf *pfvf; + void *ctx, *mask; + bool ena; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize) + return NPA_AF_ERR_AQ_ENQUEUE; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); + if (!pfvf->npalf || blkaddr < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + aq = block->aq; + if (!aq) { + dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__); + return NPA_AF_ERR_AQ_ENQUEUE; + } + + npalf = rvu_get_lf(rvu, block, pcifunc, 0); + if (npalf < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + memset(&inst, 0, sizeof(struct npa_aq_inst_s)); + inst.cindex = req->aura_id; + inst.lf = npalf; + inst.ctype = req->ctype; + inst.op = req->op; + /* Currently we are not supporting enqueuing multiple instructions, + * so always choose first entry in result memory. + */ + inst.res_addr = (u64)aq->res->iova; + + /* Clean result + context memory */ + memset(aq->res->base, 0, aq->res->entry_sz); + /* Context needs to be written at RES_ADDR + 128 */ + ctx = aq->res->base + 128; + /* Mask needs to be written at RES_ADDR + 256 */ + mask = aq->res->base + 256; + + switch (req->op) { + case NPA_AQ_INSTOP_WRITE: + /* Copy context and write mask */ + if (req->ctype == NPA_AQ_CTYPE_AURA) { + memcpy(mask, &req->aura_mask, + sizeof(struct npa_aura_s)); + memcpy(ctx, &req->aura, sizeof(struct npa_aura_s)); + } else { + memcpy(mask, &req->pool_mask, + sizeof(struct npa_pool_s)); + memcpy(ctx, &req->pool, sizeof(struct npa_pool_s)); + } + break; + case NPA_AQ_INSTOP_INIT: + if (req->ctype == NPA_AQ_CTYPE_AURA) { + if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) { + rc = NPA_AF_ERR_AQ_FULL; + break; + } + /* Set pool's context address */ + req->aura.pool_addr = pfvf->pool_ctx->iova + + (req->aura.pool_addr * pfvf->pool_ctx->entry_sz); + memcpy(ctx, &req->aura, sizeof(struct npa_aura_s)); + } else { /* POOL's context */ + memcpy(ctx, &req->pool, sizeof(struct npa_pool_s)); + } + break; + case NPA_AQ_INSTOP_NOP: + case NPA_AQ_INSTOP_READ: + case NPA_AQ_INSTOP_LOCK: + case NPA_AQ_INSTOP_UNLOCK: + break; + default: + rc = NPA_AF_ERR_AQ_FULL; + break; + } + + if (rc) + return rc; + + spin_lock(&aq->lock); + + /* Submit the instruction to AQ */ + rc = npa_aq_enqueue_wait(rvu, block, &inst); + if (rc) { + spin_unlock(&aq->lock); + return rc; + } + + /* Set aura bitmap if aura hw context is enabled */ + if (req->ctype == NPA_AQ_CTYPE_AURA) { + if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena) + __set_bit(req->aura_id, pfvf->aura_bmap); + if (req->op == NPA_AQ_INSTOP_WRITE) { + ena = (req->aura.ena & req->aura_mask.ena) | + (test_bit(req->aura_id, pfvf->aura_bmap) & + ~req->aura_mask.ena); + if (ena) + __set_bit(req->aura_id, pfvf->aura_bmap); + else + __clear_bit(req->aura_id, pfvf->aura_bmap); + } + } + + /* Set pool bitmap if pool hw context is enabled */ + if (req->ctype == NPA_AQ_CTYPE_POOL) { + if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena) + __set_bit(req->aura_id, pfvf->pool_bmap); + if (req->op == NPA_AQ_INSTOP_WRITE) { + ena = (req->pool.ena & req->pool_mask.ena) | + (test_bit(req->aura_id, pfvf->pool_bmap) & + ~req->pool_mask.ena); + if (ena) + __set_bit(req->aura_id, pfvf->pool_bmap); + else + __clear_bit(req->aura_id, pfvf->pool_bmap); + } + } + spin_unlock(&aq->lock); + + if (rsp) { + /* Copy read context into mailbox */ + if (req->op == NPA_AQ_INSTOP_READ) { + if (req->ctype == NPA_AQ_CTYPE_AURA) + memcpy(&rsp->aura, ctx, + sizeof(struct npa_aura_s)); + else + memcpy(&rsp->pool, ctx, + sizeof(struct npa_pool_s)); + } + } + + return 0; +} + +static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + struct npa_aq_enq_req aq_req; + unsigned long *bmap; + int id, cnt = 0; + int err = 0, rc; + + if (!pfvf->pool_ctx || !pfvf->aura_ctx) + return NPA_AF_ERR_AQ_ENQUEUE; + + memset(&aq_req, 0, sizeof(struct npa_aq_enq_req)); + aq_req.hdr.pcifunc = req->hdr.pcifunc; + + if (req->ctype == NPA_AQ_CTYPE_POOL) { + aq_req.pool.ena = 0; + aq_req.pool_mask.ena = 1; + cnt = pfvf->pool_ctx->qsize; + bmap = pfvf->pool_bmap; + } else if (req->ctype == NPA_AQ_CTYPE_AURA) { + aq_req.aura.ena = 0; + aq_req.aura_mask.ena = 1; + cnt = pfvf->aura_ctx->qsize; + bmap = pfvf->aura_bmap; + } + + aq_req.ctype = req->ctype; + aq_req.op = NPA_AQ_INSTOP_WRITE; + + for (id = 0; id < cnt; id++) { + if (!test_bit(id, bmap)) + continue; + aq_req.aura_id = id; + rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL); + if (rc) { + err = rc; + dev_err(rvu->dev, "Failed to disable %s:%d context\n", + (req->ctype == NPA_AQ_CTYPE_AURA) ? + "Aura" : "Pool", id); + } + } + + return err; +} + +int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, + struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp) +{ + return rvu_npa_aq_enq_inst(rvu, req, rsp); +} + +int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu, + struct hwctx_disable_req *req, + struct msg_rsp *rsp) +{ + return npa_lf_hwctx_disable(rvu, req); +} + +static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) +{ + kfree(pfvf->aura_bmap); + pfvf->aura_bmap = NULL; + + qmem_free(rvu->dev, pfvf->aura_ctx); + pfvf->aura_ctx = NULL; + + kfree(pfvf->pool_bmap); + pfvf->pool_bmap = NULL; + + qmem_free(rvu->dev, pfvf->pool_ctx); + pfvf->pool_ctx = NULL; + + qmem_free(rvu->dev, pfvf->npa_qints_ctx); + pfvf->npa_qints_ctx = NULL; +} + +int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu, + struct npa_lf_alloc_req *req, + struct npa_lf_alloc_rsp *rsp) +{ + int npalf, qints, hwctx_size, err, rc = 0; + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + u64 cfg, ctx_cfg; + int blkaddr; + + if (req->aura_sz > NPA_AURA_SZ_MAX || + req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools) + return NPA_AF_ERR_PARAM; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); + if (!pfvf->npalf || blkaddr < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + npalf = rvu_get_lf(rvu, block, pcifunc, 0); + if (npalf < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + /* Reset this NPA LF */ + err = rvu_lf_reset(rvu, block, npalf); + if (err) { + dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf); + return NPA_AF_ERR_LF_RESET; + } + + ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1); + + /* Alloc memory for aura HW contexts */ + hwctx_size = 1UL << (ctx_cfg & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->aura_ctx, + NPA_AURA_COUNT(req->aura_sz), hwctx_size); + if (err) + goto free_mem; + + pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long), + GFP_KERNEL); + if (!pfvf->aura_bmap) + goto free_mem; + + /* Alloc memory for pool HW contexts */ + hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size); + if (err) + goto free_mem; + + pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long), + GFP_KERNEL); + if (!pfvf->pool_bmap) + goto free_mem; + + /* Get no of queue interrupts supported */ + cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST); + qints = (cfg >> 28) & 0xFFF; + + /* Alloc memory for Qints HW contexts */ + hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size); + if (err) + goto free_mem; + + cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf)); + /* Clear way partition mask and set aura offset to '0' */ + cfg &= ~(BIT_ULL(34) - 1); + /* Set aura size & enable caching of contexts */ + cfg |= (req->aura_sz << 16) | BIT_ULL(34); + rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg); + + /* Configure aura HW context's base */ + rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf), + (u64)pfvf->aura_ctx->iova); + + /* Enable caching of qints hw context */ + rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36)); + rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf), + (u64)pfvf->npa_qints_ctx->iova); + + goto exit; + +free_mem: + npa_ctx_free(rvu, pfvf); + rc = -ENOMEM; + +exit: + /* set stack page info */ + cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST); + rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF; + rsp->stack_pg_bytes = cfg & 0xFF; + rsp->qints = (cfg >> 28) & 0xFFF; + return rc; +} + +int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + int npalf, err; + int blkaddr; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); + if (!pfvf->npalf || blkaddr < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + npalf = rvu_get_lf(rvu, block, pcifunc, 0); + if (npalf < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + /* Reset this NPA LF */ + err = rvu_lf_reset(rvu, block, npalf); + if (err) { + dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf); + return NPA_AF_ERR_LF_RESET; + } + + npa_ctx_free(rvu, pfvf); + + return 0; +} + +static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) +{ + u64 cfg; + int err; + + /* Set admin queue endianness */ + cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG); +#ifdef __BIG_ENDIAN + cfg |= BIT_ULL(1); + rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg); +#else + cfg &= ~BIT_ULL(1); + rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg); +#endif + + /* Do not bypass NDC cache */ + cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); + cfg &= ~0x03DULL; + rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); + + /* Result structure can be followed by Aura/Pool context at + * RES + 128bytes and a write mask at RES + 256 bytes, depending on + * operation type. Alloc sufficient result memory for all operations. + */ + err = rvu_aq_alloc(rvu, &block->aq, + Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s), + ALIGN(sizeof(struct npa_aq_res_s), 128) + 256); + if (err) + return err; + + rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE); + rvu_write64(rvu, block->addr, + NPA_AF_AQ_BASE, (u64)block->aq->inst->iova); + return 0; +} + +int rvu_npa_init(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + int blkaddr, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return 0; + + /* Initialize admin queue */ + err = npa_aq_init(rvu, &hw->block[blkaddr]); + if (err) + return err; + + return 0; +} + +void rvu_npa_freemem(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return; + + block = &hw->block[blkaddr]; + rvu_aq_free(rvu, block->aq); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c new file mode 100644 index 000000000000..23ff47f7efc5 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -0,0 +1,816 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu_struct.h" +#include "rvu_reg.h" +#include "rvu.h" +#include "npc.h" +#include "npc_profile.h" + +#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */ +#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ + +#define NIXLF_UCAST_ENTRY 0 +#define NIXLF_BCAST_ENTRY 1 +#define NIXLF_PROMISC_ENTRY 2 + +#define NPC_PARSE_RESULT_DMAC_OFFSET 8 + +struct mcam_entry { +#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */ + u64 kw[NPC_MAX_KWS_IN_KEY]; + u64 kw_mask[NPC_MAX_KWS_IN_KEY]; + u64 action; + u64 vtag_action; +}; + +void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) +{ + int blkaddr; + u64 val = 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Config CPI base for the PKIND */ + val = pkind | 1ULL << 62; + rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val); +} + +int rvu_npc_get_pkind(struct rvu *rvu, u16 pf) +{ + struct npc_pkind *pkind = &rvu->hw->pkind; + u32 map; + int i; + + for (i = 0; i < pkind->rsrc.max; i++) { + map = pkind->pfchan_map[i]; + if (((map >> 16) & 0x3F) == pf) + return i; + } + return -1; +} + +static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, + u16 pcifunc, int nixlf, int type) +{ + int pf = rvu_get_pf(pcifunc); + int index; + + /* Check if this is for a PF */ + if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) { + /* Reserved entries exclude PF0 */ + pf--; + index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF); + /* Broadcast address matching entry should be first so + * that the packet can be replicated to all VFs. + */ + if (type == NIXLF_BCAST_ENTRY) + return index; + else if (type == NIXLF_PROMISC_ENTRY) + return index + 1; + } + + return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF)); +} + +static int npc_get_bank(struct npc_mcam *mcam, int index) +{ + int bank = index / mcam->banksize; + + /* 0,1 & 2,3 banks are combined for this keysize */ + if (mcam->keysize == NPC_MCAM_KEY_X2) + return bank ? 2 : 0; + + return bank; +} + +static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index) +{ + int bank = npc_get_bank(mcam, index); + u64 cfg; + + index &= (mcam->banksize - 1); + cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank)); + return (cfg & 1); +} + +static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index, bool enable) +{ + int bank = npc_get_bank(mcam, index); + int actbank = bank; + + index &= (mcam->banksize - 1); + for (; bank < (actbank + mcam->banks_per_entry); bank++) { + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CFG(index, bank), + enable ? 1 : 0); + } +} + +static void npc_get_keyword(struct mcam_entry *entry, int idx, + u64 *cam0, u64 *cam1) +{ + u64 kw_mask = 0x00; + +#define CAM_MASK(n) (BIT_ULL(n) - 1) + + /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and + * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1. + * + * Also, only 48 bits of BANKX_CAMX_W1 are valid. + */ + switch (idx) { + case 0: + /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */ + *cam1 = entry->kw[0]; + kw_mask = entry->kw_mask[0]; + break; + case 1: + /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */ + *cam1 = entry->kw[1] & CAM_MASK(48); + kw_mask = entry->kw_mask[1] & CAM_MASK(48); + break; + case 2: + /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48> + * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0> + */ + *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16); + *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16); + kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16); + kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16); + break; + case 3: + /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48> + * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0> + */ + *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16); + *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16); + kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16); + kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16); + break; + case 4: + /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32> + * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0> + */ + *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32); + *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32); + kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32); + kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32); + break; + case 5: + /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32> + * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0> + */ + *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32); + *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32); + kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32); + kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32); + break; + case 6: + /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16> + * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0> + */ + *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48); + *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48); + kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48); + kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48); + break; + case 7: + /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */ + *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48); + kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48); + break; + } + + *cam1 &= kw_mask; + *cam0 = ~*cam1 & kw_mask; +} + +static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index, u8 intf, + struct mcam_entry *entry, bool enable) +{ + int bank = npc_get_bank(mcam, index); + int kw = 0, actbank, actindex; + u64 cam0, cam1; + + actbank = bank; /* Save bank id, to set action later on */ + actindex = index; + index &= (mcam->banksize - 1); + + /* CAM1 takes the comparison value and + * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'. + * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0 + * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1 + * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare. + */ + for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { + /* Interface should be set in all banks */ + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), + intf); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), + ~intf & 0x3); + + /* Set the match key */ + npc_get_keyword(entry, kw, &cam0, &cam1); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0); + + npc_get_keyword(entry, kw + 1, &cam0, &cam1); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0); + } + + /* Set 'action' */ + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action); + + /* Set TAG 'action' */ + rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank), + entry->vtag_action); + + /* Enable the entry */ + if (enable) + npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true); + else + npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); +} + +static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index) +{ + int bank = npc_get_bank(mcam, index); + + index &= (mcam->banksize - 1); + return rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); +} + +void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, u64 chan, u8 *mac_addr) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct mcam_entry entry = { {0} }; + struct nix_rx_action action; + int blkaddr, index, kwi; + u64 mac = 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + for (index = ETH_ALEN - 1; index >= 0; index--) + mac |= ((u64)*mac_addr++) << (8 * index); + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + + /* Match ingress channel and DMAC */ + entry.kw[0] = chan; + entry.kw_mask[0] = 0xFFFULL; + + kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64); + entry.kw[kwi] = mac; + entry.kw_mask[kwi] = BIT_ULL(48) - 1; + + /* Don't change the action if entry is already enabled + * Otherwise RSS action may get overwritten. + */ + if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { + *(u64 *)&action = npc_get_mcam_action(rvu, mcam, + blkaddr, index); + } else { + *(u64 *)&action = 0x00; + action.op = NIX_RX_ACTIONOP_UCAST; + action.pf_func = pcifunc; + } + + entry.action = *(u64 *)&action; + npc_config_mcam_entry(rvu, mcam, blkaddr, index, + NIX_INTF_RX, &entry, true); +} + +void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, u64 chan, bool allmulti) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct mcam_entry entry = { {0} }; + struct nix_rx_action action; + int blkaddr, index, kwi; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Only PF or AF VF can add a promiscuous entry */ + if (pcifunc & RVU_PFVF_FUNC_MASK) + return; + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_PROMISC_ENTRY); + + entry.kw[0] = chan; + entry.kw_mask[0] = 0xFFFULL; + + if (allmulti) { + kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64); + entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */ + entry.kw_mask[kwi] = BIT_ULL(40); + } + + *(u64 *)&action = 0x00; + action.op = NIX_RX_ACTIONOP_UCAST; + action.pf_func = pcifunc; + + entry.action = *(u64 *)&action; + npc_config_mcam_entry(rvu, mcam, blkaddr, index, + NIX_INTF_RX, &entry, true); +} + +void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, index; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Only PF's have a promiscuous entry */ + if (pcifunc & RVU_PFVF_FUNC_MASK) + return; + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_PROMISC_ENTRY); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); +} + +void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, u64 chan) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct mcam_entry entry = { {0} }; + struct nix_rx_action action; +#ifdef MCAST_MCE + struct rvu_pfvf *pfvf; +#endif + int blkaddr, index; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Only PF can add a bcast match entry */ + if (pcifunc & RVU_PFVF_FUNC_MASK) + return; +#ifdef MCAST_MCE + pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); +#endif + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_BCAST_ENTRY); + + /* Check for L2B bit and LMAC channel */ + entry.kw[0] = BIT_ULL(25) | chan; + entry.kw_mask[0] = BIT_ULL(25) | 0xFFFULL; + + *(u64 *)&action = 0x00; +#ifdef MCAST_MCE + /* Early silicon doesn't support pkt replication, + * so install entry with UCAST action, so that PF + * receives all broadcast packets. + */ + action.op = NIX_RX_ACTIONOP_MCAST; + action.pf_func = pcifunc; + action.index = pfvf->bcast_mce_idx; +#else + action.op = NIX_RX_ACTIONOP_UCAST; + action.pf_func = pcifunc; +#endif + + entry.action = *(u64 *)&action; + npc_config_mcam_entry(rvu, mcam, blkaddr, index, + NIX_INTF_RX, &entry, true); +} + +void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, + int group, int alg_idx, int mcam_index) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct nix_rx_action action; + int blkaddr, index, bank; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Check if this is for reserved default entry */ + if (mcam_index < 0) { + if (group != DEFAULT_RSS_CONTEXT_GROUP) + return; + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + } else { + /* TODO: validate this mcam index */ + index = mcam_index; + } + + if (index >= mcam->total_entries) + return; + + bank = npc_get_bank(mcam, index); + index &= (mcam->banksize - 1); + + *(u64 *)&action = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); + /* Ignore if no action was set earlier */ + if (!*(u64 *)&action) + return; + + action.op = NIX_RX_ACTIONOP_RSS; + action.pf_func = pcifunc; + action.index = group; + action.flow_key_alg = alg_idx; + + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); +} + +void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct nix_rx_action action; + int blkaddr, index, bank; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Disable ucast MCAM match entry of this PF/VF */ + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); + + /* For PF, disable promisc and bcast MCAM match entries */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_BCAST_ENTRY); + /* For bcast, disable only if it's action is not + * packet replication, incase if action is replication + * then this PF's nixlf is removed from bcast replication + * list. + */ + bank = npc_get_bank(mcam, index); + index &= (mcam->banksize - 1); + *(u64 *)&action = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); + if (action.op != NIX_RX_ACTIONOP_MCAST) + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); + + rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); + } +} + +#define LDATA_EXTRACT_CONFIG(intf, lid, ltype, ld, cfg) \ + rvu_write64(rvu, blkaddr, \ + NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg) + +#define LDATA_FLAGS_CONFIG(intf, ld, flags, cfg) \ + rvu_write64(rvu, blkaddr, \ + NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) + +static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int lid, ltype; + int lid_count; + u64 cfg; + + cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); + lid_count = (cfg >> 4) & 0xF; + + /* First clear any existing config i.e + * disable LDATA and FLAGS extraction. + */ + for (lid = 0; lid < lid_count; lid++) { + for (ltype = 0; ltype < 16; ltype++) { + LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 0, 0ULL); + LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 1, 0ULL); + LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 0, 0ULL); + LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 1, 0ULL); + + LDATA_FLAGS_CONFIG(NIX_INTF_RX, 0, ltype, 0ULL); + LDATA_FLAGS_CONFIG(NIX_INTF_RX, 1, ltype, 0ULL); + LDATA_FLAGS_CONFIG(NIX_INTF_TX, 0, ltype, 0ULL); + LDATA_FLAGS_CONFIG(NIX_INTF_TX, 1, ltype, 0ULL); + } + } + + /* If we plan to extract Outer IPv4 tuple for TCP/UDP pkts + * then 112bit key is not sufficient + */ + if (mcam->keysize != NPC_MCAM_KEY_X2) + return; + + /* Start placing extracted data/flags from 64bit onwards, for now */ + /* Extract DMAC from the packet */ + cfg = (0x05 << 16) | BIT_ULL(7) | NPC_PARSE_RESULT_DMAC_OFFSET; + LDATA_EXTRACT_CONFIG(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg); +} + +static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, + struct npc_kpu_profile_action *kpuaction, + int kpu, int entry, bool pkind) +{ + struct npc_kpu_action0 action0 = {0}; + struct npc_kpu_action1 action1 = {0}; + u64 reg; + + action1.errlev = kpuaction->errlev; + action1.errcode = kpuaction->errcode; + action1.dp0_offset = kpuaction->dp0_offset; + action1.dp1_offset = kpuaction->dp1_offset; + action1.dp2_offset = kpuaction->dp2_offset; + + if (pkind) + reg = NPC_AF_PKINDX_ACTION1(entry); + else + reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry); + + rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1); + + action0.byp_count = kpuaction->bypass_count; + action0.capture_ena = kpuaction->cap_ena; + action0.parse_done = kpuaction->parse_done; + action0.next_state = kpuaction->next_state; + action0.capture_lid = kpuaction->lid; + action0.capture_ltype = kpuaction->ltype; + action0.capture_flags = kpuaction->flags; + action0.ptr_advance = kpuaction->ptr_advance; + action0.var_len_offset = kpuaction->offset; + action0.var_len_mask = kpuaction->mask; + action0.var_len_right = kpuaction->right; + action0.var_len_shift = kpuaction->shift; + + if (pkind) + reg = NPC_AF_PKINDX_ACTION0(entry); + else + reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry); + + rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0); +} + +static void npc_config_kpucam(struct rvu *rvu, int blkaddr, + struct npc_kpu_profile_cam *kpucam, + int kpu, int entry) +{ + struct npc_kpu_cam cam0 = {0}; + struct npc_kpu_cam cam1 = {0}; + + cam1.state = kpucam->state & kpucam->state_mask; + cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask; + cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask; + cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask; + + cam0.state = ~kpucam->state & kpucam->state_mask; + cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask; + cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask; + cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask; + + rvu_write64(rvu, blkaddr, + NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0); + rvu_write64(rvu, blkaddr, + NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1); +} + +static inline u64 enable_mask(int count) +{ + return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL)); +} + +static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, + struct npc_kpu_profile *profile) +{ + int entry, num_entries, max_entries; + + if (profile->cam_entries != profile->action_entries) { + dev_err(rvu->dev, + "KPU%d: CAM and action entries [%d != %d] not equal\n", + kpu, profile->cam_entries, profile->action_entries); + } + + max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF; + + /* Program CAM match entries for previous KPU extracted data */ + num_entries = min_t(int, profile->cam_entries, max_entries); + for (entry = 0; entry < num_entries; entry++) + npc_config_kpucam(rvu, blkaddr, + &profile->cam[entry], kpu, entry); + + /* Program this KPU's actions */ + num_entries = min_t(int, profile->action_entries, max_entries); + for (entry = 0; entry < num_entries; entry++) + npc_config_kpuaction(rvu, blkaddr, &profile->action[entry], + kpu, entry, false); + + /* Enable all programmed entries */ + num_entries = min_t(int, profile->action_entries, profile->cam_entries); + rvu_write64(rvu, blkaddr, + NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries)); + if (num_entries > 64) { + rvu_write64(rvu, blkaddr, + NPC_AF_KPUX_ENTRY_DISX(kpu, 1), + enable_mask(num_entries - 64)); + } + + /* Enable this KPU */ + rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01); +} + +static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + int num_pkinds, num_kpus, idx; + struct npc_pkind *pkind; + + /* Get HW limits */ + hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F; + + /* Disable all KPUs and their entries */ + for (idx = 0; idx < hw->npc_kpus; idx++) { + rvu_write64(rvu, blkaddr, + NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL); + rvu_write64(rvu, blkaddr, + NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL); + rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00); + } + + /* First program IKPU profile i.e PKIND configs. + * Check HW max count to avoid configuring junk or + * writing to unsupported CSR addresses. + */ + pkind = &hw->pkind; + num_pkinds = ARRAY_SIZE(ikpu_action_entries); + num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds); + + for (idx = 0; idx < num_pkinds; idx++) + npc_config_kpuaction(rvu, blkaddr, + &ikpu_action_entries[idx], 0, idx, true); + + /* Program KPU CAM and Action profiles */ + num_kpus = ARRAY_SIZE(npc_kpu_profiles); + num_kpus = min_t(int, hw->npc_kpus, num_kpus); + + for (idx = 0; idx < num_kpus; idx++) + npc_program_kpu_profile(rvu, blkaddr, + idx, &npc_kpu_profiles[idx]); +} + +static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) +{ + int nixlf_count = rvu_get_nixlf_count(rvu); + struct npc_mcam *mcam = &rvu->hw->mcam; + int rsvd; + u64 cfg; + + /* Get HW limits */ + cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); + mcam->banks = (cfg >> 44) & 0xF; + mcam->banksize = (cfg >> 28) & 0xFFFF; + + /* Actual number of MCAM entries vary by entry size */ + cfg = (rvu_read64(rvu, blkaddr, + NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07; + mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize; + mcam->keysize = cfg; + + /* Number of banks combined per MCAM entry */ + if (cfg == NPC_MCAM_KEY_X4) + mcam->banks_per_entry = 4; + else if (cfg == NPC_MCAM_KEY_X2) + mcam->banks_per_entry = 2; + else + mcam->banks_per_entry = 1; + + /* Reserve one MCAM entry for each of the NIX LF to + * guarantee space to install default matching DMAC rule. + * Also reserve 2 MCAM entries for each PF for default + * channel based matching or 'bcast & promisc' matching to + * support BCAST and PROMISC modes of operation for PFs. + * PF0 is excluded. + */ + rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) + + ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF); + if (mcam->total_entries <= rsvd) { + dev_warn(rvu->dev, + "Insufficient NPC MCAM size %d for pkt I/O, exiting\n", + mcam->total_entries); + return -ENOMEM; + } + + mcam->entries = mcam->total_entries - rsvd; + mcam->nixlf_offset = mcam->entries; + mcam->pf_offset = mcam->nixlf_offset + nixlf_count; + + spin_lock_init(&mcam->lock); + + return 0; +} + +int rvu_npc_init(struct rvu *rvu) +{ + struct npc_pkind *pkind = &rvu->hw->pkind; + u64 keyz = NPC_MCAM_KEY_X2; + int blkaddr, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) { + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); + return -ENODEV; + } + + /* Allocate resource bimap for pkind*/ + pkind->rsrc.max = (rvu_read64(rvu, blkaddr, + NPC_AF_CONST1) >> 12) & 0xFF; + err = rvu_alloc_bitmap(&pkind->rsrc); + if (err) + return err; + + /* Allocate mem for pkind to PF and channel mapping info */ + pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max, + sizeof(u32), GFP_KERNEL); + if (!pkind->pfchan_map) + return -ENOMEM; + + /* Configure KPU profile */ + npc_parser_profile_init(rvu, blkaddr); + + /* Config Outer L2, IPv4's NPC layer info */ + rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2, + (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4, + (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F); + + /* Enable below for Rx pkts. + * - Outer IPv4 header checksum validation. + * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M]. + */ + rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, + rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | + BIT_ULL(6) | BIT_ULL(2)); + + /* Set RX and TX side MCAM search key size. + * Also enable parse key extract nibbles suchthat except + * layer E to H, rest of the key is included for MCAM search. + */ + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), + ((keyz & 0x3) << 32) | ((1ULL << 20) - 1)); + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), + ((keyz & 0x3) << 32) | ((1ULL << 20) - 1)); + + err = npc_mcam_rsrcs_init(rvu, blkaddr); + if (err) + return err; + + /* Config packet data and flags extraction into PARSE result */ + npc_config_ldata_extract(rvu, blkaddr); + + /* Set TX miss action to UCAST_DEFAULT i.e + * transmit the packet on NIX LF SQ's default channel. + */ + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX), + NIX_TX_ACTIONOP_UCAST_DEFAULT); + + /* If MCAM lookup doesn't result in a match, drop the received packet */ + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX), + NIX_RX_ACTIONOP_DROP); + + return 0; +} + +void rvu_npc_freemem(struct rvu *rvu) +{ + struct npc_pkind *pkind = &rvu->hw->pkind; + + kfree(pkind->rsrc.bmap); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c new file mode 100644 index 000000000000..9d7c135c7965 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu_struct.h" +#include "common.h" +#include "mbox.h" +#include "rvu.h" + +struct reg_range { + u64 start; + u64 end; +}; + +struct hw_reg_map { + u8 regblk; + u8 num_ranges; + u64 mask; +#define MAX_REG_RANGES 8 + struct reg_range range[MAX_REG_RANGES]; +}; + +static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = { + {NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } }, + {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18}, + {0x1200, 0x12E0} } }, + {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608}, + {0x1610, 0x1618} } }, + {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } }, + {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } }, +}; + +bool rvu_check_valid_reg(int regmap, int regblk, u64 reg) +{ + int idx; + struct hw_reg_map *map; + + /* Only 64bit offsets */ + if (reg & 0x07) + return false; + + if (regmap == TXSCHQ_HWREGMAP) { + if (regblk >= NIX_TXSCH_LVL_CNT) + return false; + map = &txsch_reg_map[regblk]; + } else { + return false; + } + + /* Should never happen */ + if (map->regblk != regblk) + return false; + + reg &= map->mask; + + for (idx = 0; idx < map->num_ranges; idx++) { + if (reg >= map->range[idx].start && + reg < map->range[idx].end) + return true; + } + return false; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h new file mode 100644 index 000000000000..09a8d61f3144 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -0,0 +1,502 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RVU_REG_H +#define RVU_REG_H + +/* Admin function registers */ +#define RVU_AF_MSIXTR_BASE (0x10) +#define RVU_AF_ECO (0x20) +#define RVU_AF_BLK_RST (0x30) +#define RVU_AF_PF_BAR4_ADDR (0x40) +#define RVU_AF_RAS (0x100) +#define RVU_AF_RAS_W1S (0x108) +#define RVU_AF_RAS_ENA_W1S (0x110) +#define RVU_AF_RAS_ENA_W1C (0x118) +#define RVU_AF_GEN_INT (0x120) +#define RVU_AF_GEN_INT_W1S (0x128) +#define RVU_AF_GEN_INT_ENA_W1S (0x130) +#define RVU_AF_GEN_INT_ENA_W1C (0x138) +#define RVU_AF_AFPF_MBOX0 (0x02000) +#define RVU_AF_AFPF_MBOX1 (0x02008) +#define RVU_AF_AFPFX_MBOXX(a, b) (0x2000 | (a) << 4 | (b) << 3) +#define RVU_AF_PFME_STATUS (0x2800) +#define RVU_AF_PFTRPEND (0x2810) +#define RVU_AF_PFTRPEND_W1S (0x2820) +#define RVU_AF_PF_RST (0x2840) +#define RVU_AF_HWVF_RST (0x2850) +#define RVU_AF_PFAF_MBOX_INT (0x2880) +#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888) +#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890) +#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898) +#define RVU_AF_PFFLR_INT (0x28a0) +#define RVU_AF_PFFLR_INT_W1S (0x28a8) +#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0) +#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8) +#define RVU_AF_PFME_INT (0x28c0) +#define RVU_AF_PFME_INT_W1S (0x28c8) +#define RVU_AF_PFME_INT_ENA_W1S (0x28d0) +#define RVU_AF_PFME_INT_ENA_W1C (0x28d8) + +/* Admin function's privileged PF/VF registers */ +#define RVU_PRIV_CONST (0x8000000) +#define RVU_PRIV_GEN_CFG (0x8000010) +#define RVU_PRIV_CLK_CFG (0x8000020) +#define RVU_PRIV_ACTIVE_PC (0x8000030) +#define RVU_PRIV_PFX_CFG(a) (0x8000100 | (a) << 16) +#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16) +#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16) +#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16) +#define RVU_PRIV_PFX_NIX0_CFG (0x8000300) +#define RVU_PRIV_PFX_NPA_CFG (0x8000310) +#define RVU_PRIV_PFX_SSO_CFG (0x8000320) +#define RVU_PRIV_PFX_SSOW_CFG (0x8000330) +#define RVU_PRIV_PFX_TIM_CFG (0x8000340) +#define RVU_PRIV_PFX_CPT0_CFG (0x8000350) +#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3) +#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16) +#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300) +#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310) +#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320) +#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330) +#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340) +#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350) + +/* RVU PF registers */ +#define RVU_PF_VFX_PFVF_MBOX0 (0x00000) +#define RVU_PF_VFX_PFVF_MBOX1 (0x00008) +#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3) +#define RVU_PF_VF_BAR4_ADDR (0x10) +#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3) +#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3) +#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3) +#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3) +#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3) +#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3) +#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3) +#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3) +#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3) +#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3) +#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3) +#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3) +#define RVU_PF_PFAF_MBOX0 (0xC00) +#define RVU_PF_PFAF_MBOX1 (0xC08) +#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3) +#define RVU_PF_INT (0xc20) +#define RVU_PF_INT_W1S (0xc28) +#define RVU_PF_INT_ENA_W1S (0xc30) +#define RVU_PF_INT_ENA_W1C (0xc38) +#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4) +#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) +#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3) + +/* RVU VF registers */ +#define RVU_VF_VFPF_MBOX0 (0x00000) +#define RVU_VF_VFPF_MBOX1 (0x00008) + +/* NPA block's admin function registers */ +#define NPA_AF_BLK_RST (0x0000) +#define NPA_AF_CONST (0x0010) +#define NPA_AF_CONST1 (0x0018) +#define NPA_AF_LF_RST (0x0020) +#define NPA_AF_GEN_CFG (0x0030) +#define NPA_AF_NDC_CFG (0x0040) +#define NPA_AF_INP_CTL (0x00D0) +#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0) +#define NPA_AF_AVG_DELAY (0x0100) +#define NPA_AF_GEN_INT (0x0140) +#define NPA_AF_GEN_INT_W1S (0x0148) +#define NPA_AF_GEN_INT_ENA_W1S (0x0150) +#define NPA_AF_GEN_INT_ENA_W1C (0x0158) +#define NPA_AF_RVU_INT (0x0160) +#define NPA_AF_RVU_INT_W1S (0x0168) +#define NPA_AF_RVU_INT_ENA_W1S (0x0170) +#define NPA_AF_RVU_INT_ENA_W1C (0x0178) +#define NPA_AF_ERR_INT (0x0180) +#define NPA_AF_ERR_INT_W1S (0x0188) +#define NPA_AF_ERR_INT_ENA_W1S (0x0190) +#define NPA_AF_ERR_INT_ENA_W1C (0x0198) +#define NPA_AF_RAS (0x01A0) +#define NPA_AF_RAS_W1S (0x01A8) +#define NPA_AF_RAS_ENA_W1S (0x01B0) +#define NPA_AF_RAS_ENA_W1C (0x01B8) +#define NPA_AF_BP_TEST (0x0200) +#define NPA_AF_ECO (0x0300) +#define NPA_AF_AQ_CFG (0x0600) +#define NPA_AF_AQ_BASE (0x0610) +#define NPA_AF_AQ_STATUS (0x0620) +#define NPA_AF_AQ_DOOR (0x0630) +#define NPA_AF_AQ_DONE_WAIT (0x0640) +#define NPA_AF_AQ_DONE (0x0650) +#define NPA_AF_AQ_DONE_ACK (0x0660) +#define NPA_AF_AQ_DONE_INT (0x0680) +#define NPA_AF_AQ_DONE_INT_W1S (0x0688) +#define NPA_AF_AQ_DONE_ENA_W1S (0x0690) +#define NPA_AF_AQ_DONE_ENA_W1C (0x0698) +#define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18) +#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18) +#define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18) +#define NPA_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 18) +#define NPA_PRIV_AF_INT_CFG (0x10000) +#define NPA_PRIV_LFX_CFG (0x10010) +#define NPA_PRIV_LFX_INT_CFG (0x10020) +#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030) + +/* NIX block's admin function registers */ +#define NIX_AF_CFG (0x0000) +#define NIX_AF_STATUS (0x0010) +#define NIX_AF_NDC_CFG (0x0018) +#define NIX_AF_CONST (0x0020) +#define NIX_AF_CONST1 (0x0028) +#define NIX_AF_CONST2 (0x0030) +#define NIX_AF_CONST3 (0x0038) +#define NIX_AF_SQ_CONST (0x0040) +#define NIX_AF_CQ_CONST (0x0048) +#define NIX_AF_RQ_CONST (0x0050) +#define NIX_AF_PSE_CONST (0x0060) +#define NIX_AF_TL1_CONST (0x0070) +#define NIX_AF_TL2_CONST (0x0078) +#define NIX_AF_TL3_CONST (0x0080) +#define NIX_AF_TL4_CONST (0x0088) +#define NIX_AF_MDQ_CONST (0x0090) +#define NIX_AF_MC_MIRROR_CONST (0x0098) +#define NIX_AF_LSO_CFG (0x00A8) +#define NIX_AF_BLK_RST (0x00B0) +#define NIX_AF_TX_TSTMP_CFG (0x00C0) +#define NIX_AF_RX_CFG (0x00D0) +#define NIX_AF_AVG_DELAY (0x00E0) +#define NIX_AF_CINT_DELAY (0x00F0) +#define NIX_AF_RX_MCAST_BASE (0x0100) +#define NIX_AF_RX_MCAST_CFG (0x0110) +#define NIX_AF_RX_MCAST_BUF_BASE (0x0120) +#define NIX_AF_RX_MCAST_BUF_CFG (0x0130) +#define NIX_AF_RX_MIRROR_BUF_BASE (0x0140) +#define NIX_AF_RX_MIRROR_BUF_CFG (0x0148) +#define NIX_AF_LF_RST (0x0150) +#define NIX_AF_GEN_INT (0x0160) +#define NIX_AF_GEN_INT_W1S (0x0168) +#define NIX_AF_GEN_INT_ENA_W1S (0x0170) +#define NIX_AF_GEN_INT_ENA_W1C (0x0178) +#define NIX_AF_ERR_INT (0x0180) +#define NIX_AF_ERR_INT_W1S (0x0188) +#define NIX_AF_ERR_INT_ENA_W1S (0x0190) +#define NIX_AF_ERR_INT_ENA_W1C (0x0198) +#define NIX_AF_RAS (0x01A0) +#define NIX_AF_RAS_W1S (0x01A8) +#define NIX_AF_RAS_ENA_W1S (0x01B0) +#define NIX_AF_RAS_ENA_W1C (0x01B8) +#define NIX_AF_RVU_INT (0x01C0) +#define NIX_AF_RVU_INT_W1S (0x01C8) +#define NIX_AF_RVU_INT_ENA_W1S (0x01D0) +#define NIX_AF_RVU_INT_ENA_W1C (0x01D8) +#define NIX_AF_TCP_TIMER (0x01E0) +#define NIX_AF_RX_WQE_TAG_CTL (0x01F0) +#define NIX_AF_RX_DEF_OL2 (0x0200) +#define NIX_AF_RX_DEF_OIP4 (0x0210) +#define NIX_AF_RX_DEF_IIP4 (0x0220) +#define NIX_AF_RX_DEF_OIP6 (0x0230) +#define NIX_AF_RX_DEF_IIP6 (0x0240) +#define NIX_AF_RX_DEF_OTCP (0x0250) +#define NIX_AF_RX_DEF_ITCP (0x0260) +#define NIX_AF_RX_DEF_OUDP (0x0270) +#define NIX_AF_RX_DEF_IUDP (0x0280) +#define NIX_AF_RX_DEF_OSCTP (0x0290) +#define NIX_AF_RX_DEF_ISCTP (0x02A0) +#define NIX_AF_RX_DEF_IPSECX (0x02B0) +#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300) +#define NIX_AF_RX_CPTX_INST_ADDR (0x0310) +#define NIX_AF_NDC_TX_SYNC (0x03F0) +#define NIX_AF_AQ_CFG (0x0400) +#define NIX_AF_AQ_BASE (0x0410) +#define NIX_AF_AQ_STATUS (0x0420) +#define NIX_AF_AQ_DOOR (0x0430) +#define NIX_AF_AQ_DONE_WAIT (0x0440) +#define NIX_AF_AQ_DONE (0x0450) +#define NIX_AF_AQ_DONE_ACK (0x0460) +#define NIX_AF_AQ_DONE_TIMER (0x0470) +#define NIX_AF_AQ_DONE_INT (0x0480) +#define NIX_AF_AQ_DONE_INT_W1S (0x0488) +#define NIX_AF_AQ_DONE_ENA_W1S (0x0490) +#define NIX_AF_AQ_DONE_ENA_W1C (0x0498) +#define NIX_AF_RX_LINKX_SLX_SPKT_CNT (0x0500) +#define NIX_AF_RX_LINKX_SLX_SXQE_CNT (0x0510) +#define NIX_AF_RX_MCAST_JOBSX_SW_CNT (0x0520) +#define NIX_AF_RX_MIRROR_JOBSX_SW_CNT (0x0530) +#define NIX_AF_RX_LINKX_CFG(a) (0x0540 | (a) << 16) +#define NIX_AF_RX_SW_SYNC (0x0550) +#define NIX_AF_RX_SW_SYNC_DONE (0x0560) +#define NIX_AF_SEB_ECO (0x0600) +#define NIX_AF_SEB_TEST_BP (0x0610) +#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620) +#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630) +#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640) +#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660) +#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670) + +#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3) +#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16) +#define NIX_AF_PSE_CHANNEL_LEVEL (0x800) +#define NIX_AF_PSE_SHAPER_CFG (0x810) +#define NIX_AF_TX_EXPR_CREDIT (0x830) +#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18) +#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16) +#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16) +#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16) +#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16) +#define NIX_AF_SDP_LINK_CREDIT (0xa40) +#define NIX_AF_SDP_SW_XOFFX(a) (0xA60 | (a) << 3) +#define NIX_AF_SDP_HW_XOFFX(a) (0xAC0 | (a) << 3) +#define NIX_AF_TL4X_BP_STATUS(a) (0xB00 | (a) << 16) +#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (a) << 16) +#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16) +#define NIX_AF_TL1X_SHAPE(a) (0xC10 | (a) << 16) +#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16) +#define NIX_AF_TL1X_SHAPE_STATE(a) (0xC50 | (a) << 16) +#define NIX_AF_TL1X_SW_XOFF(a) (0xC70 | (a) << 16) +#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16) +#define NIX_AF_TL1X_GREEN(a) (0xC90 | (a) << 16) +#define NIX_AF_TL1X_YELLOW(a) (0xCA0 | (a) << 16) +#define NIX_AF_TL1X_RED(a) (0xCB0 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG0(a) (0xCC0 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG1(a) (0xCC8 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG2(a) (0xCD0 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG3(a) (0xCD8 | (a) << 16) +#define NIX_AF_TL1A_DEBUG (0xce0) +#define NIX_AF_TL1B_DEBUG (0xcf0) +#define NIX_AF_TL1_DEBUG_GREEN (0xd00) +#define NIX_AF_TL1_DEBUG_NODE (0xd10) +#define NIX_AF_TL1X_DROPPED_PACKETS(a) (0xD20 | (a) << 16) +#define NIX_AF_TL1X_DROPPED_BYTES(a) (0xD30 | (a) << 16) +#define NIX_AF_TL1X_RED_PACKETS(a) (0xD40 | (a) << 16) +#define NIX_AF_TL1X_RED_BYTES(a) (0xD50 | (a) << 16) +#define NIX_AF_TL1X_YELLOW_PACKETS(a) (0xD60 | (a) << 16) +#define NIX_AF_TL1X_YELLOW_BYTES(a) (0xD70 | (a) << 16) +#define NIX_AF_TL1X_GREEN_PACKETS(a) (0xD80 | (a) << 16) +#define NIX_AF_TL1X_GREEN_BYTES(a) (0xD90 | (a) << 16) +#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16) +#define NIX_AF_TL2X_SHAPE(a) (0xE10 | (a) << 16) +#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16) +#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16) +#define NIX_AF_TL2X_SCHED_STATE(a) (0xE40 | (a) << 16) +#define NIX_AF_TL2X_SHAPE_STATE(a) (0xE50 | (a) << 16) +#define NIX_AF_TL2X_POINTERS(a) (0xE60 | (a) << 16) +#define NIX_AF_TL2X_SW_XOFF(a) (0xE70 | (a) << 16) +#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16) +#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16) +#define NIX_AF_TL2X_GREEN(a) (0xE90 | (a) << 16) +#define NIX_AF_TL2X_YELLOW(a) (0xEA0 | (a) << 16) +#define NIX_AF_TL2X_RED(a) (0xEB0 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG0(a) (0xEC0 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG1(a) (0xEC8 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG2(a) (0xED0 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG3(a) (0xED8 | (a) << 16) +#define NIX_AF_TL2A_DEBUG (0xee0) +#define NIX_AF_TL2B_DEBUG (0xef0) +#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16) +#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16) +#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16) +#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16) +#define NIX_AF_TL3X_SCHED_STATE(a) (0x1040 | (a) << 16) +#define NIX_AF_TL3X_SHAPE_STATE(a) (0x1050 | (a) << 16) +#define NIX_AF_TL3X_POINTERS(a) (0x1060 | (a) << 16) +#define NIX_AF_TL3X_SW_XOFF(a) (0x1070 | (a) << 16) +#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16) +#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16) +#define NIX_AF_TL3X_GREEN(a) (0x1090 | (a) << 16) +#define NIX_AF_TL3X_YELLOW(a) (0x10A0 | (a) << 16) +#define NIX_AF_TL3X_RED(a) (0x10B0 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG0(a) (0x10C0 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG1(a) (0x10C8 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG2(a) (0x10D0 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG3(a) (0x10D8 | (a) << 16) +#define NIX_AF_TL3A_DEBUG (0x10e0) +#define NIX_AF_TL3B_DEBUG (0x10f0) +#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16) +#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16) +#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16) +#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16) +#define NIX_AF_TL4X_SCHED_STATE(a) (0x1240 | (a) << 16) +#define NIX_AF_TL4X_SHAPE_STATE(a) (0x1250 | (a) << 16) +#define NIX_AF_TL4X_POINTERS(a) (0x1260 | (a) << 16) +#define NIX_AF_TL4X_SW_XOFF(a) (0x1270 | (a) << 16) +#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16) +#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16) +#define NIX_AF_TL4X_GREEN(a) (0x1290 | (a) << 16) +#define NIX_AF_TL4X_YELLOW(a) (0x12A0 | (a) << 16) +#define NIX_AF_TL4X_RED(a) (0x12B0 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG0(a) (0x12C0 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG1(a) (0x12C8 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG2(a) (0x12D0 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG3(a) (0x12D8 | (a) << 16) +#define NIX_AF_TL4A_DEBUG (0x12e0) +#define NIX_AF_TL4B_DEBUG (0x12f0) +#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16) +#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16) +#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16) +#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16) +#define NIX_AF_MDQX_SCHED_STATE(a) (0x1440 | (a) << 16) +#define NIX_AF_MDQX_SHAPE_STATE(a) (0x1450 | (a) << 16) +#define NIX_AF_MDQX_POINTERS(a) (0x1460 | (a) << 16) +#define NIX_AF_MDQX_SW_XOFF(a) (0x1470 | (a) << 16) +#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16) +#define NIX_AF_MDQX_MD_DEBUG(a) (0x14C0 | (a) << 16) +#define NIX_AF_MDQX_PTR_FIFO(a) (0x14D0 | (a) << 16) +#define NIX_AF_MDQA_DEBUG (0x14e0) +#define NIX_AF_MDQB_DEBUG (0x14f0) +#define NIX_AF_TL3_TL2X_CFG(a) (0x1600 | (a) << 18) +#define NIX_AF_TL3_TL2X_BP_STATUS(a) (0x1610 | (a) << 16) +#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3) +#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b) (0x1800 | (a) << 18 | (b) << 3) +#define NIX_AF_TX_MCASTX(a) (0x1900 | (a) << 15) +#define NIX_AF_TX_VTAG_DEFX_CTL(a) (0x1A00 | (a) << 16) +#define NIX_AF_TX_VTAG_DEFX_DATA(a) (0x1A10 | (a) << 16) +#define NIX_AF_RX_BPIDX_STATUS(a) (0x1A20 | (a) << 17) +#define NIX_AF_RX_CHANX_CFG(a) (0x1A30 | (a) << 15) +#define NIX_AF_CINT_TIMERX(a) (0x1A40 | (a) << 18) +#define NIX_AF_LSO_FORMATX_FIELDX(a, b) (0x1B00 | (a) << 16 | (b) << 3) +#define NIX_AF_LFX_CFG(a) (0x4000 | (a) << 17) +#define NIX_AF_LFX_SQS_CFG(a) (0x4020 | (a) << 17) +#define NIX_AF_LFX_TX_CFG2(a) (0x4028 | (a) << 17) +#define NIX_AF_LFX_SQS_BASE(a) (0x4030 | (a) << 17) +#define NIX_AF_LFX_RQS_CFG(a) (0x4040 | (a) << 17) +#define NIX_AF_LFX_RQS_BASE(a) (0x4050 | (a) << 17) +#define NIX_AF_LFX_CQS_CFG(a) (0x4060 | (a) << 17) +#define NIX_AF_LFX_CQS_BASE(a) (0x4070 | (a) << 17) +#define NIX_AF_LFX_TX_CFG(a) (0x4080 | (a) << 17) +#define NIX_AF_LFX_TX_PARSE_CFG(a) (0x4090 | (a) << 17) +#define NIX_AF_LFX_RX_CFG(a) (0x40A0 | (a) << 17) +#define NIX_AF_LFX_RSS_CFG(a) (0x40C0 | (a) << 17) +#define NIX_AF_LFX_RSS_BASE(a) (0x40D0 | (a) << 17) +#define NIX_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 17) +#define NIX_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 17) +#define NIX_AF_LFX_CINTS_CFG(a) (0x4120 | (a) << 17) +#define NIX_AF_LFX_CINTS_BASE(a) (0x4130 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17) +#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17) +#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_LOCKX(a, b) (0x4300 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_TX_STATX(a, b) (0x4400 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_RX_STATX(a, b) (0x4500 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_RSS_GRPX(a, b) (0x4600 | (a) << 17 | (b) << 3) +#define NIX_AF_RX_NPC_MC_RCV (0x4700) +#define NIX_AF_RX_NPC_MC_DROP (0x4710) +#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720) +#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730) +#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16) + +#define NIX_PRIV_AF_INT_CFG (0x8000000) +#define NIX_PRIV_LFX_CFG (0x8000010) +#define NIX_PRIV_LFX_INT_CFG (0x8000020) +#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030) + +/* SSO */ +#define SSO_AF_CONST (0x1000) +#define SSO_AF_CONST1 (0x1008) +#define SSO_AF_BLK_RST (0x10f8) +#define SSO_AF_LF_HWGRP_RST (0x10e0) +#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800) +#define SSO_PRIV_LFX_HWGRP_CFG (0x10000) +#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000) + +/* SSOW */ +#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010) +#define SSOW_AF_LF_HWS_RST (0x0030) +#define SSOW_PRIV_LFX_HWS_CFG (0x1000) +#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000) + +/* TIM */ +#define TIM_AF_CONST (0x90) +#define TIM_PRIV_LFX_CFG (0x20000) +#define TIM_PRIV_LFX_INT_CFG (0x24000) +#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000) +#define TIM_AF_BLK_RST (0x10) +#define TIM_AF_LF_RST (0x20) + +/* CPT */ +#define CPT_AF_CONSTANTS0 (0x0000) +#define CPT_PRIV_LFX_CFG (0x41000) +#define CPT_PRIV_LFX_INT_CFG (0x43000) +#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000) +#define CPT_AF_LF_RST (0x44000) +#define CPT_AF_BLK_RST (0x46000) + +#define NDC_AF_BLK_RST (0x002F0) +#define NPC_AF_BLK_RST (0x00040) + +/* NPC */ +#define NPC_AF_CFG (0x00000) +#define NPC_AF_ACTIVE_PC (0x00010) +#define NPC_AF_CONST (0x00020) +#define NPC_AF_CONST1 (0x00030) +#define NPC_AF_BLK_RST (0x00040) +#define NPC_AF_MCAM_SCRUB_CTL (0x000a0) +#define NPC_AF_KCAM_SCRUB_CTL (0x000b0) +#define NPC_AF_KPUX_CFG(a) (0x00500 | (a) << 3) +#define NPC_AF_PCK_CFG (0x00600) +#define NPC_AF_PCK_DEF_OL2 (0x00610) +#define NPC_AF_PCK_DEF_OIP4 (0x00620) +#define NPC_AF_PCK_DEF_OIP6 (0x00630) +#define NPC_AF_PCK_DEF_IIP4 (0x00640) +#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3) +#define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8) +#define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6) +#define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6) +#define NPC_AF_PKINDX_CPI_DEFX(a, b) (0x80020ull | (a) << 6 | (b) << 3) +#define NPC_AF_KPUX_ENTRYX_CAMX(a, b, c) \ + (0x100000 | (a) << 14 | (b) << 6 | (c) << 3) +#define NPC_AF_KPUX_ENTRYX_ACTION0(a, b) \ + (0x100020 | (a) << 14 | (b) << 6) +#define NPC_AF_KPUX_ENTRYX_ACTION1(a, b) \ + (0x100028 | (a) << 14 | (b) << 6) +#define NPC_AF_KPUX_ENTRY_DISX(a, b) (0x180000 | (a) << 6 | (b) << 3) +#define NPC_AF_CPIX_CFG(a) (0x200000 | (a) << 3) +#define NPC_AF_INTFX_LIDX_LTX_LDX_CFG(a, b, c, d) \ + (0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3) +#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \ + (0x980000 | (a) << 16 | (b) << 12 | (c) << 3) +#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \ + (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3) +#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \ + (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3) +#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \ + (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3) +#define NPC_AF_MCAMEX_BANKX_CFG(a, b) (0x1800000ull | (a) << 8 | (b) << 4) +#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \ + (0x1880000 | (a) << 8 | (b) << 4) +#define NPC_AF_MATCH_STATX(a) (0x1880008 | (a) << 8) +#define NPC_AF_INTFX_MISS_STAT_ACT(a) (0x1880040 + (a) * 0x8) +#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) (0x1900000ull | (a) << 8 | (b) << 4) +#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \ + (0x1900008 | (a) << 8 | (b) << 4) +#define NPC_AF_INTFX_MISS_ACT(a) (0x1a00000 | (a) << 4) +#define NPC_AF_INTFX_MISS_TAG_ACT(a) (0x1b00008 | (a) << 4) +#define NPC_AF_MCAM_BANKX_HITX(a, b) (0x1c80000 | (a) << 8 | (b) << 4) +#define NPC_AF_LKUP_CTL (0x2000000) +#define NPC_AF_LKUP_DATAX(a) (0x2000200 | (a) << 4) +#define NPC_AF_LKUP_RESULTX(a) (0x2000400 | (a) << 4) +#define NPC_AF_INTFX_STAT(a) (0x2000800 | (a) << 4) +#define NPC_AF_DBG_CTL (0x3000000) +#define NPC_AF_DBG_STATUS (0x3000010) +#define NPC_AF_KPUX_DBG(a) (0x3000020 | (a) << 8) +#define NPC_AF_IKPU_ERR_CTL (0x3000080) +#define NPC_AF_KPUX_ERR_CTL(a) (0x30000a0 | (a) << 8) +#define NPC_AF_MCAM_DBG (0x3001000) +#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4) +#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4) + +#endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h new file mode 100644 index 000000000000..f920dac74e6c --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -0,0 +1,917 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RVU_STRUCT_H +#define RVU_STRUCT_H + +/* RVU Block Address Enumeration */ +enum rvu_block_addr_e { + BLKADDR_RVUM = 0x0ULL, + BLKADDR_LMT = 0x1ULL, + BLKADDR_MSIX = 0x2ULL, + BLKADDR_NPA = 0x3ULL, + BLKADDR_NIX0 = 0x4ULL, + BLKADDR_NIX1 = 0x5ULL, + BLKADDR_NPC = 0x6ULL, + BLKADDR_SSO = 0x7ULL, + BLKADDR_SSOW = 0x8ULL, + BLKADDR_TIM = 0x9ULL, + BLKADDR_CPT0 = 0xaULL, + BLKADDR_CPT1 = 0xbULL, + BLKADDR_NDC0 = 0xcULL, + BLKADDR_NDC1 = 0xdULL, + BLKADDR_NDC2 = 0xeULL, + BLK_COUNT = 0xfULL, +}; + +/* RVU Block Type Enumeration */ +enum rvu_block_type_e { + BLKTYPE_RVUM = 0x0, + BLKTYPE_MSIX = 0x1, + BLKTYPE_LMT = 0x2, + BLKTYPE_NIX = 0x3, + BLKTYPE_NPA = 0x4, + BLKTYPE_NPC = 0x5, + BLKTYPE_SSO = 0x6, + BLKTYPE_SSOW = 0x7, + BLKTYPE_TIM = 0x8, + BLKTYPE_CPT = 0x9, + BLKTYPE_NDC = 0xa, + BLKTYPE_MAX = 0xa, +}; + +/* RVU Admin function Interrupt Vector Enumeration */ +enum rvu_af_int_vec_e { + RVU_AF_INT_VEC_POISON = 0x0, + RVU_AF_INT_VEC_PFFLR = 0x1, + RVU_AF_INT_VEC_PFME = 0x2, + RVU_AF_INT_VEC_GEN = 0x3, + RVU_AF_INT_VEC_MBOX = 0x4, + RVU_AF_INT_VEC_CNT = 0x5, +}; + +/** + * RVU PF Interrupt Vector Enumeration + */ +enum rvu_pf_int_vec_e { + RVU_PF_INT_VEC_VFFLR0 = 0x0, + RVU_PF_INT_VEC_VFFLR1 = 0x1, + RVU_PF_INT_VEC_VFME0 = 0x2, + RVU_PF_INT_VEC_VFME1 = 0x3, + RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4, + RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5, + RVU_PF_INT_VEC_AFPF_MBOX = 0x6, + RVU_PF_INT_VEC_CNT = 0x7, +}; + +/* NPA admin queue completion enumeration */ +enum npa_aq_comp { + NPA_AQ_COMP_NOTDONE = 0x0, + NPA_AQ_COMP_GOOD = 0x1, + NPA_AQ_COMP_SWERR = 0x2, + NPA_AQ_COMP_CTX_POISON = 0x3, + NPA_AQ_COMP_CTX_FAULT = 0x4, + NPA_AQ_COMP_LOCKERR = 0x5, +}; + +/* NPA admin queue context types */ +enum npa_aq_ctype { + NPA_AQ_CTYPE_AURA = 0x0, + NPA_AQ_CTYPE_POOL = 0x1, +}; + +/* NPA admin queue instruction opcodes */ +enum npa_aq_instop { + NPA_AQ_INSTOP_NOP = 0x0, + NPA_AQ_INSTOP_INIT = 0x1, + NPA_AQ_INSTOP_WRITE = 0x2, + NPA_AQ_INSTOP_READ = 0x3, + NPA_AQ_INSTOP_LOCK = 0x4, + NPA_AQ_INSTOP_UNLOCK = 0x5, +}; + +/* NPA admin queue instruction structure */ +struct npa_aq_inst_s { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 doneint : 1; /* W0 */ + u64 reserved_44_62 : 19; + u64 cindex : 20; + u64 reserved_17_23 : 7; + u64 lf : 9; + u64 ctype : 4; + u64 op : 4; +#else + u64 op : 4; + u64 ctype : 4; + u64 lf : 9; + u64 reserved_17_23 : 7; + u64 cindex : 20; + u64 reserved_44_62 : 19; + u64 doneint : 1; +#endif + u64 res_addr; /* W1 */ +}; + +/* NPA admin queue result structure */ +struct npa_aq_res_s { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_17_63 : 47; /* W0 */ + u64 doneint : 1; + u64 compcode : 8; + u64 ctype : 4; + u64 op : 4; +#else + u64 op : 4; + u64 ctype : 4; + u64 compcode : 8; + u64 doneint : 1; + u64 reserved_17_63 : 47; +#endif + u64 reserved_64_127; /* W1 */ +}; + +struct npa_aura_s { + u64 pool_addr; /* W0 */ +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 avg_level : 8; + u64 reserved_118_119 : 2; + u64 shift : 6; + u64 aura_drop : 8; + u64 reserved_98_103 : 6; + u64 bp_ena : 2; + u64 aura_drop_ena : 1; + u64 pool_drop_ena : 1; + u64 reserved_93 : 1; + u64 avg_con : 9; + u64 pool_way_mask : 16; + u64 pool_caching : 1; + u64 reserved_65 : 2; + u64 ena : 1; +#else + u64 ena : 1; + u64 reserved_65 : 2; + u64 pool_caching : 1; + u64 pool_way_mask : 16; + u64 avg_con : 9; + u64 reserved_93 : 1; + u64 pool_drop_ena : 1; + u64 aura_drop_ena : 1; + u64 bp_ena : 2; + u64 reserved_98_103 : 6; + u64 aura_drop : 8; + u64 shift : 6; + u64 reserved_118_119 : 2; + u64 avg_level : 8; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ + u64 reserved_189_191 : 3; + u64 nix1_bpid : 9; + u64 reserved_177_179 : 3; + u64 nix0_bpid : 9; + u64 reserved_164_167 : 4; + u64 count : 36; +#else + u64 count : 36; + u64 reserved_164_167 : 4; + u64 nix0_bpid : 9; + u64 reserved_177_179 : 3; + u64 nix1_bpid : 9; + u64 reserved_189_191 : 3; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ + u64 reserved_252_255 : 4; + u64 fc_hyst_bits : 4; + u64 fc_stype : 2; + u64 fc_up_crossing : 1; + u64 fc_ena : 1; + u64 reserved_240_243 : 4; + u64 bp : 8; + u64 reserved_228_231 : 4; + u64 limit : 36; +#else + u64 limit : 36; + u64 reserved_228_231 : 4; + u64 bp : 8; + u64 reserved_240_243 : 4; + u64 fc_ena : 1; + u64 fc_up_crossing : 1; + u64 fc_stype : 2; + u64 fc_hyst_bits : 4; + u64 reserved_252_255 : 4; +#endif + u64 fc_addr; /* W4 */ +#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */ + u64 reserved_379_383 : 5; + u64 err_qint_idx : 7; + u64 reserved_371 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_363 : 1; + u64 thresh_up : 1; + u64 thresh_int_ena : 1; + u64 thresh_int : 1; + u64 err_int_ena : 8; + u64 err_int : 8; + u64 update_time : 16; + u64 pool_drop : 8; +#else + u64 pool_drop : 8; + u64 update_time : 16; + u64 err_int : 8; + u64 err_int_ena : 8; + u64 thresh_int : 1; + u64 thresh_int_ena : 1; + u64 thresh_up : 1; + u64 reserved_363 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_371 : 1; + u64 err_qint_idx : 7; + u64 reserved_379_383 : 5; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */ + u64 reserved_420_447 : 28; + u64 thresh : 36; +#else + u64 thresh : 36; + u64 reserved_420_447 : 28; +#endif + u64 reserved_448_511; /* W7 */ +}; + +struct npa_pool_s { + u64 stack_base; /* W0 */ +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 reserved_115_127 : 13; + u64 buf_size : 11; + u64 reserved_100_103 : 4; + u64 buf_offset : 12; + u64 stack_way_mask : 16; + u64 reserved_70_71 : 3; + u64 stack_caching : 1; + u64 reserved_66_67 : 2; + u64 nat_align : 1; + u64 ena : 1; +#else + u64 ena : 1; + u64 nat_align : 1; + u64 reserved_66_67 : 2; + u64 stack_caching : 1; + u64 reserved_70_71 : 3; + u64 stack_way_mask : 16; + u64 buf_offset : 12; + u64 reserved_100_103 : 4; + u64 buf_size : 11; + u64 reserved_115_127 : 13; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ + u64 stack_pages : 32; + u64 stack_max_pages : 32; +#else + u64 stack_max_pages : 32; + u64 stack_pages : 32; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ + u64 reserved_240_255 : 16; + u64 op_pc : 48; +#else + u64 op_pc : 48; + u64 reserved_240_255 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */ + u64 reserved_316_319 : 4; + u64 update_time : 16; + u64 reserved_297_299 : 3; + u64 fc_up_crossing : 1; + u64 fc_hyst_bits : 4; + u64 fc_stype : 2; + u64 fc_ena : 1; + u64 avg_con : 9; + u64 avg_level : 8; + u64 reserved_270_271 : 2; + u64 shift : 6; + u64 reserved_260_263 : 4; + u64 stack_offset : 4; +#else + u64 stack_offset : 4; + u64 reserved_260_263 : 4; + u64 shift : 6; + u64 reserved_270_271 : 2; + u64 avg_level : 8; + u64 avg_con : 9; + u64 fc_ena : 1; + u64 fc_stype : 2; + u64 fc_hyst_bits : 4; + u64 fc_up_crossing : 1; + u64 reserved_297_299 : 3; + u64 update_time : 16; + u64 reserved_316_319 : 4; +#endif + u64 fc_addr; /* W5 */ + u64 ptr_start; /* W6 */ + u64 ptr_end; /* W7 */ +#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */ + u64 reserved_571_575 : 5; + u64 err_qint_idx : 7; + u64 reserved_563 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_555 : 1; + u64 thresh_up : 1; + u64 thresh_int_ena : 1; + u64 thresh_int : 1; + u64 err_int_ena : 8; + u64 err_int : 8; + u64 reserved_512_535 : 24; +#else + u64 reserved_512_535 : 24; + u64 err_int : 8; + u64 err_int_ena : 8; + u64 thresh_int : 1; + u64 thresh_int_ena : 1; + u64 thresh_up : 1; + u64 reserved_555 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_563 : 1; + u64 err_qint_idx : 7; + u64 reserved_571_575 : 5; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */ + u64 reserved_612_639 : 28; + u64 thresh : 36; +#else + u64 thresh : 36; + u64 reserved_612_639 : 28; +#endif + u64 reserved_640_703; /* W10 */ + u64 reserved_704_767; /* W11 */ + u64 reserved_768_831; /* W12 */ + u64 reserved_832_895; /* W13 */ + u64 reserved_896_959; /* W14 */ + u64 reserved_960_1023; /* W15 */ +}; + +/* NIX admin queue completion status */ +enum nix_aq_comp { + NIX_AQ_COMP_NOTDONE = 0x0, + NIX_AQ_COMP_GOOD = 0x1, + NIX_AQ_COMP_SWERR = 0x2, + NIX_AQ_COMP_CTX_POISON = 0x3, + NIX_AQ_COMP_CTX_FAULT = 0x4, + NIX_AQ_COMP_LOCKERR = 0x5, + NIX_AQ_COMP_SQB_ALLOC_FAIL = 0x6, +}; + +/* NIX admin queue context types */ +enum nix_aq_ctype { + NIX_AQ_CTYPE_RQ = 0x0, + NIX_AQ_CTYPE_SQ = 0x1, + NIX_AQ_CTYPE_CQ = 0x2, + NIX_AQ_CTYPE_MCE = 0x3, + NIX_AQ_CTYPE_RSS = 0x4, + NIX_AQ_CTYPE_DYNO = 0x5, +}; + +/* NIX admin queue instruction opcodes */ +enum nix_aq_instop { + NIX_AQ_INSTOP_NOP = 0x0, + NIX_AQ_INSTOP_INIT = 0x1, + NIX_AQ_INSTOP_WRITE = 0x2, + NIX_AQ_INSTOP_READ = 0x3, + NIX_AQ_INSTOP_LOCK = 0x4, + NIX_AQ_INSTOP_UNLOCK = 0x5, +}; + +/* NIX admin queue instruction structure */ +struct nix_aq_inst_s { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 doneint : 1; /* W0 */ + u64 reserved_44_62 : 19; + u64 cindex : 20; + u64 reserved_15_23 : 9; + u64 lf : 7; + u64 ctype : 4; + u64 op : 4; +#else + u64 op : 4; + u64 ctype : 4; + u64 lf : 7; + u64 reserved_15_23 : 9; + u64 cindex : 20; + u64 reserved_44_62 : 19; + u64 doneint : 1; +#endif + u64 res_addr; /* W1 */ +}; + +/* NIX admin queue result structure */ +struct nix_aq_res_s { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_17_63 : 47; /* W0 */ + u64 doneint : 1; + u64 compcode : 8; + u64 ctype : 4; + u64 op : 4; +#else + u64 op : 4; + u64 ctype : 4; + u64 compcode : 8; + u64 doneint : 1; + u64 reserved_17_63 : 47; +#endif + u64 reserved_64_127; /* W1 */ +}; + +/* NIX Completion queue context structure */ +struct nix_cq_ctx_s { + u64 base; +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 wrptr : 20; + u64 avg_con : 9; + u64 cint_idx : 7; + u64 cq_err : 1; + u64 qint_idx : 7; + u64 rsvd_81_83 : 3; + u64 bpid : 9; + u64 rsvd_69_71 : 3; + u64 bp_ena : 1; + u64 rsvd_64_67 : 4; +#else + u64 rsvd_64_67 : 4; + u64 bp_ena : 1; + u64 rsvd_69_71 : 3; + u64 bpid : 9; + u64 rsvd_81_83 : 3; + u64 qint_idx : 7; + u64 cq_err : 1; + u64 cint_idx : 7; + u64 avg_con : 9; + u64 wrptr : 20; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ + u64 update_time : 16; + u64 avg_level : 8; + u64 head : 20; + u64 tail : 20; +#else + u64 tail : 20; + u64 head : 20; + u64 avg_level : 8; + u64 update_time : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ + u64 cq_err_int_ena : 8; + u64 cq_err_int : 8; + u64 qsize : 4; + u64 rsvd_233_235 : 3; + u64 caching : 1; + u64 substream : 20; + u64 rsvd_210_211 : 2; + u64 ena : 1; + u64 drop_ena : 1; + u64 drop : 8; + u64 dp : 8; +#else + u64 dp : 8; + u64 drop : 8; + u64 drop_ena : 1; + u64 ena : 1; + u64 rsvd_210_211 : 2; + u64 substream : 20; + u64 caching : 1; + u64 rsvd_233_235 : 3; + u64 qsize : 4; + u64 cq_err_int : 8; + u64 cq_err_int_ena : 8; +#endif +}; + +/* NIX Receive queue context structure */ +struct nix_rq_ctx_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ + u64 wqe_aura : 20; + u64 substream : 20; + u64 cq : 20; + u64 ena_wqwd : 1; + u64 ipsech_ena : 1; + u64 sso_ena : 1; + u64 ena : 1; +#else + u64 ena : 1; + u64 sso_ena : 1; + u64 ipsech_ena : 1; + u64 ena_wqwd : 1; + u64 cq : 20; + u64 substream : 20; + u64 wqe_aura : 20; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 rsvd_127_122 : 6; + u64 lpb_drop_ena : 1; + u64 spb_drop_ena : 1; + u64 xqe_drop_ena : 1; + u64 wqe_caching : 1; + u64 pb_caching : 2; + u64 sso_tt : 2; + u64 sso_grp : 10; + u64 lpb_aura : 20; + u64 spb_aura : 20; +#else + u64 spb_aura : 20; + u64 lpb_aura : 20; + u64 sso_grp : 10; + u64 sso_tt : 2; + u64 pb_caching : 2; + u64 wqe_caching : 1; + u64 xqe_drop_ena : 1; + u64 spb_drop_ena : 1; + u64 lpb_drop_ena : 1; + u64 rsvd_127_122 : 6; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ + u64 xqe_hdr_split : 1; + u64 xqe_imm_copy : 1; + u64 rsvd_189_184 : 6; + u64 xqe_imm_size : 6; + u64 later_skip : 6; + u64 rsvd_171 : 1; + u64 first_skip : 7; + u64 lpb_sizem1 : 12; + u64 spb_ena : 1; + u64 rsvd_150_148 : 3; + u64 wqe_skip : 2; + u64 spb_sizem1 : 6; + u64 rsvd_139_128 : 12; +#else + u64 rsvd_139_128 : 12; + u64 spb_sizem1 : 6; + u64 wqe_skip : 2; + u64 rsvd_150_148 : 3; + u64 spb_ena : 1; + u64 lpb_sizem1 : 12; + u64 first_skip : 7; + u64 rsvd_171 : 1; + u64 later_skip : 6; + u64 xqe_imm_size : 6; + u64 rsvd_189_184 : 6; + u64 xqe_imm_copy : 1; + u64 xqe_hdr_split : 1; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ + u64 spb_pool_pass : 8; + u64 spb_pool_drop : 8; + u64 spb_aura_pass : 8; + u64 spb_aura_drop : 8; + u64 wqe_pool_pass : 8; + u64 wqe_pool_drop : 8; + u64 xqe_pass : 8; + u64 xqe_drop : 8; +#else + u64 xqe_drop : 8; + u64 xqe_pass : 8; + u64 wqe_pool_drop : 8; + u64 wqe_pool_pass : 8; + u64 spb_aura_drop : 8; + u64 spb_aura_pass : 8; + u64 spb_pool_drop : 8; + u64 spb_pool_pass : 8; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */ + u64 rsvd_319_315 : 5; + u64 qint_idx : 7; + u64 rq_int_ena : 8; + u64 rq_int : 8; + u64 rsvd_291_288 : 4; + u64 lpb_pool_pass : 8; + u64 lpb_pool_drop : 8; + u64 lpb_aura_pass : 8; + u64 lpb_aura_drop : 8; +#else + u64 lpb_aura_drop : 8; + u64 lpb_aura_pass : 8; + u64 lpb_pool_drop : 8; + u64 lpb_pool_pass : 8; + u64 rsvd_291_288 : 4; + u64 rq_int : 8; + u64 rq_int_ena : 8; + u64 qint_idx : 7; + u64 rsvd_319_315 : 5; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */ + u64 rsvd_383_366 : 18; + u64 flow_tagw : 6; + u64 bad_utag : 8; + u64 good_utag : 8; + u64 ltag : 24; +#else + u64 ltag : 24; + u64 good_utag : 8; + u64 bad_utag : 8; + u64 flow_tagw : 6; + u64 rsvd_383_366 : 18; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */ + u64 rsvd_447_432 : 16; + u64 octs : 48; +#else + u64 octs : 48; + u64 rsvd_447_432 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W7 */ + u64 rsvd_511_496 : 16; + u64 pkts : 48; +#else + u64 pkts : 48; + u64 rsvd_511_496 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */ + u64 rsvd_575_560 : 16; + u64 drop_octs : 48; +#else + u64 drop_octs : 48; + u64 rsvd_575_560 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */ + u64 rsvd_639_624 : 16; + u64 drop_pkts : 48; +#else + u64 drop_pkts : 48; + u64 rsvd_639_624 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */ + u64 rsvd_703_688 : 16; + u64 re_pkts : 48; +#else + u64 re_pkts : 48; + u64 rsvd_703_688 : 16; +#endif + u64 rsvd_767_704; /* W11 */ + u64 rsvd_831_768; /* W12 */ + u64 rsvd_895_832; /* W13 */ + u64 rsvd_959_896; /* W14 */ + u64 rsvd_1023_960; /* W15 */ +}; + +/* NIX sqe sizes */ +enum nix_maxsqesz { + NIX_MAXSQESZ_W16 = 0x0, + NIX_MAXSQESZ_W8 = 0x1, +}; + +/* NIX SQB caching type */ +enum nix_stype { + NIX_STYPE_STF = 0x0, + NIX_STYPE_STT = 0x1, + NIX_STYPE_STP = 0x2, +}; + +/* NIX Send queue context structure */ +struct nix_sq_ctx_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ + u64 sqe_way_mask : 16; + u64 cq : 20; + u64 sdp_mcast : 1; + u64 substream : 20; + u64 qint_idx : 6; + u64 ena : 1; +#else + u64 ena : 1; + u64 qint_idx : 6; + u64 substream : 20; + u64 sdp_mcast : 1; + u64 cq : 20; + u64 sqe_way_mask : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 sqb_count : 16; + u64 default_chan : 12; + u64 smq_rr_quantum : 24; + u64 sso_ena : 1; + u64 xoff : 1; + u64 cq_ena : 1; + u64 smq : 9; +#else + u64 smq : 9; + u64 cq_ena : 1; + u64 xoff : 1; + u64 sso_ena : 1; + u64 smq_rr_quantum : 24; + u64 default_chan : 12; + u64 sqb_count : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ + u64 rsvd_191 : 1; + u64 sqe_stype : 2; + u64 sq_int_ena : 8; + u64 sq_int : 8; + u64 sqb_aura : 20; + u64 smq_rr_count : 25; +#else + u64 smq_rr_count : 25; + u64 sqb_aura : 20; + u64 sq_int : 8; + u64 sq_int_ena : 8; + u64 sqe_stype : 2; + u64 rsvd_191 : 1; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ + u64 rsvd_255_253 : 3; + u64 smq_next_sq_vld : 1; + u64 smq_pend : 1; + u64 smenq_next_sqb_vld : 1; + u64 head_offset : 6; + u64 smenq_offset : 6; + u64 tail_offset : 6; + u64 smq_lso_segnum : 8; + u64 smq_next_sq : 20; + u64 mnq_dis : 1; + u64 lmt_dis : 1; + u64 cq_limit : 8; + u64 max_sqe_size : 2; +#else + u64 max_sqe_size : 2; + u64 cq_limit : 8; + u64 lmt_dis : 1; + u64 mnq_dis : 1; + u64 smq_next_sq : 20; + u64 smq_lso_segnum : 8; + u64 tail_offset : 6; + u64 smenq_offset : 6; + u64 head_offset : 6; + u64 smenq_next_sqb_vld : 1; + u64 smq_pend : 1; + u64 smq_next_sq_vld : 1; + u64 rsvd_255_253 : 3; +#endif + u64 next_sqb : 64;/* W4 */ + u64 tail_sqb : 64;/* W5 */ + u64 smenq_sqb : 64;/* W6 */ + u64 smenq_next_sqb : 64;/* W7 */ + u64 head_sqb : 64;/* W8 */ +#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */ + u64 rsvd_639_630 : 10; + u64 vfi_lso_vld : 1; + u64 vfi_lso_vlan1_ins_ena : 1; + u64 vfi_lso_vlan0_ins_ena : 1; + u64 vfi_lso_mps : 14; + u64 vfi_lso_sb : 8; + u64 vfi_lso_sizem1 : 3; + u64 vfi_lso_total : 18; + u64 rsvd_583_576 : 8; +#else + u64 rsvd_583_576 : 8; + u64 vfi_lso_total : 18; + u64 vfi_lso_sizem1 : 3; + u64 vfi_lso_sb : 8; + u64 vfi_lso_mps : 14; + u64 vfi_lso_vlan0_ins_ena : 1; + u64 vfi_lso_vlan1_ins_ena : 1; + u64 vfi_lso_vld : 1; + u64 rsvd_639_630 : 10; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */ + u64 rsvd_703_658 : 46; + u64 scm_lso_rem : 18; +#else + u64 scm_lso_rem : 18; + u64 rsvd_703_658 : 46; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W11 */ + u64 rsvd_767_752 : 16; + u64 octs : 48; +#else + u64 octs : 48; + u64 rsvd_767_752 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W12 */ + u64 rsvd_831_816 : 16; + u64 pkts : 48; +#else + u64 pkts : 48; + u64 rsvd_831_816 : 16; +#endif + u64 rsvd_895_832 : 64;/* W13 */ +#if defined(__BIG_ENDIAN_BITFIELD) /* W14 */ + u64 rsvd_959_944 : 16; + u64 dropped_octs : 48; +#else + u64 dropped_octs : 48; + u64 rsvd_959_944 : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W15 */ + u64 rsvd_1023_1008 : 16; + u64 dropped_pkts : 48; +#else + u64 dropped_pkts : 48; + u64 rsvd_1023_1008 : 16; +#endif +}; + +/* NIX Receive side scaling entry structure*/ +struct nix_rsse_s { +#if defined(__BIG_ENDIAN_BITFIELD) + uint32_t reserved_20_31 : 12; + uint32_t rq : 20; +#else + uint32_t rq : 20; + uint32_t reserved_20_31 : 12; + +#endif +}; + +/* NIX receive multicast/mirror entry structure */ +struct nix_rx_mce_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ + uint64_t next : 16; + uint64_t pf_func : 16; + uint64_t rsvd_31_24 : 8; + uint64_t index : 20; + uint64_t eol : 1; + uint64_t rsvd_2 : 1; + uint64_t op : 2; +#else + uint64_t op : 2; + uint64_t rsvd_2 : 1; + uint64_t eol : 1; + uint64_t index : 20; + uint64_t rsvd_31_24 : 8; + uint64_t pf_func : 16; + uint64_t next : 16; +#endif +}; + +enum nix_lsoalg { + NIX_LSOALG_NOP, + NIX_LSOALG_ADD_SEGNUM, + NIX_LSOALG_ADD_PAYLEN, + NIX_LSOALG_ADD_OFFSET, + NIX_LSOALG_TCP_FLAGS, +}; + +enum nix_txlayer { + NIX_TXLAYER_OL3, + NIX_TXLAYER_OL4, + NIX_TXLAYER_IL3, + NIX_TXLAYER_IL4, +}; + +struct nix_lso_format { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd_19_63 : 45; + u64 alg : 3; + u64 rsvd_14_15 : 2; + u64 sizem1 : 2; + u64 rsvd_10_11 : 2; + u64 layer : 2; + u64 offset : 8; +#else + u64 offset : 8; + u64 layer : 2; + u64 rsvd_10_11 : 2; + u64 sizem1 : 2; + u64 rsvd_14_15 : 2; + u64 alg : 3; + u64 rsvd_19_63 : 45; +#endif +}; + +struct nix_rx_flowkey_alg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_35_63 :29; + u64 ltype_match :4; + u64 ltype_mask :4; + u64 sel_chan :1; + u64 ena :1; + u64 reserved_24_24 :1; + u64 lid :3; + u64 bytesm1 :5; + u64 hdr_offset :8; + u64 fn_mask :1; + u64 ln_mask :1; + u64 key_offset :6; +#else + u64 key_offset :6; + u64 ln_mask :1; + u64 fn_mask :1; + u64 hdr_offset :8; + u64 bytesm1 :5; + u64 lid :3; + u64 reserved_24_24 :1; + u64 ena :1; + u64 sel_chan :1; + u64 ltype_mask :4; + u64 ltype_match :4; + u64 reserved_35_63 :29; +#endif +}; + +/* NIX VTAG size */ +enum nix_vtag_size { + VTAGSIZE_T4 = 0x0, + VTAGSIZE_T8 = 0x1, +}; +#endif /* RVU_STRUCT_H */ diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 3a9730612a70..0bd4351b2a49 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -988,8 +988,8 @@ static int pxa168_init_phy(struct net_device *dev) cmd.base.phy_address = pep->phy_addr; cmd.base.speed = pep->phy_speed; cmd.base.duplex = pep->phy_duplex; - ethtool_convert_legacy_u32_to_link_mode(cmd.link_modes.advertising, - PHY_BASIC_FEATURES); + bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES, + __ETHTOOL_LINK_MODE_MASK_NBITS); cmd.base.autoneg = AUTONEG_ENABLE; if (cmd.base.speed != 0) @@ -1260,7 +1260,8 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) return work_done; } -static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 6e6abdc399de..7dbfdac4067a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -243,11 +243,7 @@ static void mtk_phy_link_adjust(struct net_device *dev) if (dev->phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - if (dev->phydev->advertising & ADVERTISED_Pause) - lcl_adv |= ADVERTISE_PAUSE_CAP; - if (dev->phydev->advertising & ADVERTISED_Asym_Pause) - lcl_adv |= ADVERTISE_PAUSE_ASYM; - + lcl_adv = ethtool_adv_to_lcl_adv_t(dev->phydev->advertising); flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) @@ -355,12 +351,8 @@ static int mtk_phy_connect(struct net_device *dev) dev->phydev->speed = 0; dev->phydev->duplex = 0; - if (of_phy_is_fixed_link(mac->of_node)) - dev->phydev->supported |= - SUPPORTED_Pause | SUPPORTED_Asym_Pause; - - dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause; + phy_set_max_speed(dev->phydev, SPEED_1000); + phy_support_asym_pause(dev->phydev); dev->phydev->advertising = dev->phydev->supported | ADVERTISED_Autoneg; phy_start_aneg(dev->phydev); @@ -405,7 +397,7 @@ static int mtk_mdio_init(struct mtk_eth *eth) eth->mii_bus->priv = eth; eth->mii_bus->parent = eth->dev; - snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); + snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); ret = of_mdiobus_register(eth->mii_bus, mii_np); err_put_node: diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 4bdf25059542..deef5a998985 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -614,7 +614,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, int i; buf->direct.buf = NULL; - buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->nbufs = DIV_ROUND_UP(size, PAGE_SIZE); buf->npages = buf->nbufs; buf->page_shift = PAGE_SHIFT; buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index d25e16d2c319..109472d6b61f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -167,8 +167,13 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->prof[i].rx_ppp = pfcrx; params->prof[i].tx_pause = !(pfcrx || pfctx); params->prof[i].tx_ppp = pfctx; - params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; - params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; + if (mlx4_low_memory_profile()) { + params->prof[i].tx_ring_size = MLX4_EN_MIN_TX_SIZE; + params->prof[i].rx_ring_size = MLX4_EN_MIN_RX_SIZE; + } else { + params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; + params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; + } params->prof[i].num_up = MLX4_EN_NUM_UP_LOW; params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up; params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up * diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 7262c6310650..4b4351141b94 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -406,7 +406,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; if (WARN_ON(!obj_per_chunk)) return -EINVAL; - num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; + num_icm = DIV_ROUND_UP(nobj, obj_per_chunk); table->icm = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); if (!table->icm) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index d2d59444f562..6a046030e873 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -260,47 +260,34 @@ static const struct devlink_param mlx4_devlink_params[] = { NULL, NULL, NULL), }; -static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id, - union devlink_param_value init_val) -{ - struct mlx4_priv *priv = devlink_priv(devlink); - struct mlx4_dev *dev = &priv->dev; - int err; - - err = devlink_param_driverinit_value_set(devlink, param_id, init_val); - if (err) - mlx4_warn(dev, - "devlink set parameter %u value failed (err = %d)", - param_id, err); -} - static void mlx4_devlink_set_params_init_values(struct devlink *devlink) { union devlink_param_value value; value.vbool = !!mlx4_internal_err_reset; - mlx4_devlink_set_init_value(devlink, - DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, - value); + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, + value); value.vu32 = 1UL << log_num_mac; - mlx4_devlink_set_init_value(devlink, - DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value); + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + value); value.vbool = enable_64b_cqe_eqe; - mlx4_devlink_set_init_value(devlink, - MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, - value); + devlink_param_driverinit_value_set(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + value); value.vbool = enable_4k_uar; - mlx4_devlink_set_init_value(devlink, - MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, - value); + devlink_param_driverinit_value_set(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, + value); value.vbool = false; - mlx4_devlink_set_init_value(devlink, - DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, - value); + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + value); } static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index c3228b89df46..485d856546c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -72,7 +72,7 @@ #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) #define DEF_RX_RINGS 16 #define MAX_RX_RINGS 128 -#define MIN_RX_RINGS 4 +#define MIN_RX_RINGS 1 #define LOG_TXBB_SIZE 6 #define TXBB_SIZE BIT(LOG_TXBB_SIZE) #define HEADROOM (2048 / TXBB_SIZE + 1) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index a53736c26c0c..a5a0823e5ada 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -308,10 +308,11 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_MODIFY_FLOW_TABLE: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: - case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: + case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT: case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_FPGA_DESTROY_QP: case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: + case MLX5_CMD_OP_DEALLOC_MEMIC: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -426,7 +427,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER: - case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: + case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_FPGA_CREATE_QP: case MLX5_CMD_OP_FPGA_MODIFY_QP: @@ -435,6 +436,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: + case MLX5_CMD_OP_ALLOC_MEMIC: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -599,8 +601,8 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); - MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); - MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); + MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT); + MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT); MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT); MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT); MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP); @@ -617,6 +619,8 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT); MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT); MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); + MLX5_COMMAND_STR_CASE(ALLOC_MEMIC); + MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC); default: return "unknown command opcode"; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index a4179122a279..4b85abb5c9f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -109,6 +109,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq->cons_index = 0; cq->arm_sn = 0; cq->eq = eq; + cq->uid = MLX5_GET(create_cq_in, in, uid); refcount_set(&cq->refcount, 1); init_completion(&cq->free); if (!cq->comp) @@ -144,6 +145,7 @@ err_cmd: memset(dout, 0, sizeof(dout)); MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ); MLX5_SET(destroy_cq_in, din, cqn, cq->cqn); + MLX5_SET(destroy_cq_in, din, uid, cq->uid); mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); return err; } @@ -165,6 +167,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); + MLX5_SET(destroy_cq_in, in, uid, cq->uid); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; @@ -196,6 +199,7 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0}; MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ); + MLX5_SET(modify_cq_in, in, uid, cq->uid); return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_modify_cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index 0240aee9189e..d027ce00c8ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h @@ -133,7 +133,7 @@ TRACE_EVENT(mlx5_fs_del_fg, {MLX5_FLOW_CONTEXT_ACTION_DROP, "DROP"},\ {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, "FWD"},\ {MLX5_FLOW_CONTEXT_ACTION_COUNT, "CNT"},\ - {MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\ + {MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT, "REFORMAT"},\ {MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\ {MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\ {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\ @@ -252,10 +252,10 @@ TRACE_EVENT(mlx5_fs_add_rule, memcpy(__entry->destination, &rule->dest_attr, sizeof(__entry->destination)); - if (rule->dest_attr.type & MLX5_FLOW_DESTINATION_TYPE_COUNTER && - rule->dest_attr.counter) + if (rule->dest_attr.type & + MLX5_FLOW_DESTINATION_TYPE_COUNTER) __entry->counter_id = - rule->dest_attr.counter->id; + rule->dest_attr.counter_id; ), TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n", __entry->rule, __entry->fte, __entry->index, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 0f189f873859..d7fbd5b6ac95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -173,6 +173,7 @@ static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) } } +/* Use this function to get max num channels (rxqs/txqs) only to create netdev */ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) { return is_kdump_kernel() ? @@ -181,6 +182,13 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) MLX5E_MAX_NUM_CHANNELS); } +/* Use this function to get max num channels after netdev was created */ +static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev) +{ + return min_t(unsigned int, netdev->num_rx_queues, + netdev->num_tx_queues); +} + struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; @@ -205,18 +213,12 @@ struct mlx5e_umr_wqe { extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; -static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { - "rx_cqe_moder", - "tx_cqe_moder", - "rx_cqe_compress", - "rx_striding_rq", -}; - enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1), MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2), MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3), + MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4), }; #define MLX5E_SET_PFLAG(params, pflag, enable) \ @@ -298,6 +300,7 @@ struct mlx5e_dcbx_dp { enum { MLX5E_RQ_STATE_ENABLED, MLX5E_RQ_STATE_AM, + MLX5E_RQ_STATE_NO_CSUM_COMPLETE, }; struct mlx5e_cq { @@ -678,7 +681,7 @@ struct mlx5e_priv { struct work_struct update_carrier_work; struct work_struct set_rx_mode_work; struct work_struct tx_timeout_work; - struct delayed_work update_stats_work; + struct work_struct update_stats_work; struct mlx5_core_dev *mdev; struct net_device *netdev; @@ -703,7 +706,7 @@ struct mlx5e_priv { }; struct mlx5e_profile { - void (*init)(struct mlx5_core_dev *mdev, + int (*init)(struct mlx5_core_dev *mdev, struct net_device *netdev, const struct mlx5e_profile *profile, void *ppriv); void (*cleanup)(struct mlx5e_priv *priv); @@ -715,7 +718,6 @@ struct mlx5e_profile { void (*disable)(struct mlx5e_priv *priv); void (*update_stats)(struct mlx5e_priv *priv); void (*update_carrier)(struct mlx5e_priv *priv); - int (*max_nch)(struct mlx5_core_dev *mdev); struct { mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; @@ -906,10 +908,16 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb); /* common netdev helpers */ +void mlx5e_create_q_counters(struct mlx5e_priv *priv); +void mlx5e_destroy_q_counters(struct mlx5e_priv *priv); +int mlx5e_open_drop_rq(struct mlx5e_priv *priv, + struct mlx5e_rq *drop_rq); +void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); + int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); -int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv); -void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); +int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); +void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); int mlx5e_create_direct_rqts(struct mlx5e_priv *priv); void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv); @@ -925,8 +933,8 @@ int mlx5e_create_tises(struct mlx5e_priv *priv); void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv); int mlx5e_close(struct net_device *netdev); int mlx5e_open(struct net_device *netdev); -void mlx5e_update_stats_work(struct work_struct *work); +void mlx5e_queue_update_stats(struct mlx5e_priv *priv); int mlx5e_bits_invert(unsigned long a, int size); typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv); @@ -953,21 +961,32 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal); int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal); +u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); +u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, struct ethtool_ts_info *info); int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, struct ethtool_flash *flash); /* mlx5e generic netdev management API */ +int mlx5e_netdev_init(struct net_device *netdev, + struct mlx5e_priv *priv, + struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile, + void *ppriv); +void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv); struct net_device* mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, - void *ppriv); + int nch, void *ppriv); int mlx5e_attach_netdev(struct mlx5e_priv *priv); void mlx5e_detach_netdev(struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 max_channels, u16 mtu); +void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); +void mlx5e_build_rss_params(struct mlx5e_params *params); u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 24e3b564964f..023dc4bccd28 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -235,3 +235,211 @@ out: kfree(out); return err; } + +static u32 fec_supported_speeds[] = { + 10000, + 40000, + 25000, + 50000, + 56000, + 100000 +}; + +#define MLX5E_FEC_SUPPORTED_SPEEDS ARRAY_SIZE(fec_supported_speeds) + +/* get/set FEC admin field for a given speed */ +static int mlx5e_fec_admin_field(u32 *pplm, + u8 *fec_policy, + bool write, + u32 speed) +{ + switch (speed) { + case 10000: + case 40000: + if (!write) + *fec_policy = MLX5_GET(pplm_reg, pplm, + fec_override_cap_10g_40g); + else + MLX5_SET(pplm_reg, pplm, + fec_override_admin_10g_40g, *fec_policy); + break; + case 25000: + if (!write) + *fec_policy = MLX5_GET(pplm_reg, pplm, + fec_override_admin_25g); + else + MLX5_SET(pplm_reg, pplm, + fec_override_admin_25g, *fec_policy); + break; + case 50000: + if (!write) + *fec_policy = MLX5_GET(pplm_reg, pplm, + fec_override_admin_50g); + else + MLX5_SET(pplm_reg, pplm, + fec_override_admin_50g, *fec_policy); + break; + case 56000: + if (!write) + *fec_policy = MLX5_GET(pplm_reg, pplm, + fec_override_admin_56g); + else + MLX5_SET(pplm_reg, pplm, + fec_override_admin_56g, *fec_policy); + break; + case 100000: + if (!write) + *fec_policy = MLX5_GET(pplm_reg, pplm, + fec_override_admin_100g); + else + MLX5_SET(pplm_reg, pplm, + fec_override_admin_100g, *fec_policy); + break; + default: + return -EINVAL; + } + return 0; +} + +/* returns FEC capabilities for a given speed */ +static int mlx5e_get_fec_cap_field(u32 *pplm, + u8 *fec_cap, + u32 speed) +{ + switch (speed) { + case 10000: + case 40000: + *fec_cap = MLX5_GET(pplm_reg, pplm, + fec_override_admin_10g_40g); + break; + case 25000: + *fec_cap = MLX5_GET(pplm_reg, pplm, + fec_override_cap_25g); + break; + case 50000: + *fec_cap = MLX5_GET(pplm_reg, pplm, + fec_override_cap_50g); + break; + case 56000: + *fec_cap = MLX5_GET(pplm_reg, pplm, + fec_override_cap_56g); + break; + case 100000: + *fec_cap = MLX5_GET(pplm_reg, pplm, + fec_override_cap_100g); + break; + default: + return -EINVAL; + } + return 0; +} + +int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps) +{ + u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(pplm_reg); + u32 current_fec_speed; + int err; + + if (!MLX5_CAP_GEN(dev, pcam_reg)) + return -EOPNOTSUPP; + + if (!MLX5_CAP_PCAM_REG(dev, pplm)) + return -EOPNOTSUPP; + + MLX5_SET(pplm_reg, in, local_port, 1); + err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0); + if (err) + return err; + + err = mlx5e_port_linkspeed(dev, ¤t_fec_speed); + if (err) + return err; + + return mlx5e_get_fec_cap_field(out, fec_caps, current_fec_speed); +} + +int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active, + u8 *fec_configured_mode) +{ + u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(pplm_reg); + u32 link_speed; + int err; + + if (!MLX5_CAP_GEN(dev, pcam_reg)) + return -EOPNOTSUPP; + + if (!MLX5_CAP_PCAM_REG(dev, pplm)) + return -EOPNOTSUPP; + + MLX5_SET(pplm_reg, in, local_port, 1); + err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0); + if (err) + return err; + + *fec_mode_active = MLX5_GET(pplm_reg, out, fec_mode_active); + + if (!fec_configured_mode) + return 0; + + err = mlx5e_port_linkspeed(dev, &link_speed); + if (err) + return err; + + return mlx5e_fec_admin_field(out, fec_configured_mode, 0, link_speed); +} + +int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) +{ + bool fec_mode_not_supp_in_speed = false; + u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC); + u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(pplm_reg); + u32 current_fec_speed; + u8 fec_caps = 0; + int err; + int i; + + if (!MLX5_CAP_GEN(dev, pcam_reg)) + return -EOPNOTSUPP; + + if (!MLX5_CAP_PCAM_REG(dev, pplm)) + return -EOPNOTSUPP; + + MLX5_SET(pplm_reg, in, local_port, 1); + err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0); + if (err) + return err; + + err = mlx5e_port_linkspeed(dev, ¤t_fec_speed); + if (err) + return err; + + memset(in, 0, sz); + MLX5_SET(pplm_reg, in, local_port, 1); + for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) { + mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]); + /* policy supported for link speed */ + if (!!(fec_caps & fec_policy)) { + mlx5e_fec_admin_field(in, &fec_policy, 1, + fec_supported_speeds[i]); + } else { + if (fec_supported_speeds[i] == current_fec_speed) + return -EOPNOTSUPP; + mlx5e_fec_admin_field(in, &no_fec_policy, 1, + fec_supported_speeds[i]); + fec_mode_not_supp_in_speed = true; + } + } + + if (fec_mode_not_supp_in_speed) + mlx5_core_dbg(dev, + "FEC policy 0x%x is not supported for some speeds", + fec_policy); + + return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h index f8cbd8194179..cd2160b8c9bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -45,4 +45,16 @@ int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); + +int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps); +int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active, + u8 *fec_configured_mode); +int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy); + +enum { + MLX5E_FEC_NOFEC, + MLX5E_FEC_FIRECODE, + MLX5E_FEC_RS_528_514, +}; + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 45cdde694d20..8657e0f26995 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -543,8 +543,11 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); - netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n", - __func__, arfs_rule->filter_id, arfs_rule->rxq, err); + priv->channel_stats[arfs_rule->rxq].rq.arfs_err++; + mlx5e_dbg(HW, priv, + "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n", + __func__, arfs_rule->filter_id, arfs_rule->rxq, + tuple->ip_proto, err); } out: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index db3278cc052b..3078491cc0d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -153,7 +153,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) if (enable_uc_lb) MLX5_SET(modify_tir_in, in, ctx.self_lb_block, - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_); + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST); MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 98dd3e0ada72..3e770abfd802 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -135,6 +135,14 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); } +static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { + "rx_cqe_moder", + "tx_cqe_moder", + "rx_cqe_compress", + "rx_striding_rq", + "rx_no_csum_complete", +}; + int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) { int i, num_stats = 0; @@ -311,7 +319,7 @@ static int mlx5e_set_ringparam(struct net_device *dev, void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, struct ethtool_channels *ch) { - ch->max_combined = priv->profile->max_nch(priv->mdev); + ch->max_combined = mlx5e_get_netdev_max_channels(priv->netdev); ch->combined_count = priv->channels.params.num_channels; } @@ -539,6 +547,70 @@ static void ptys2ethtool_adver_link(unsigned long *advertising_modes, __ETHTOOL_LINK_MODE_MASK_NBITS); } +static const u32 pplm_fec_2_ethtool[] = { + [MLX5E_FEC_NOFEC] = ETHTOOL_FEC_OFF, + [MLX5E_FEC_FIRECODE] = ETHTOOL_FEC_BASER, + [MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS, +}; + +static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size) +{ + int mode = 0; + + if (!fec_mode) + return ETHTOOL_FEC_AUTO; + + mode = find_first_bit(&fec_mode, size); + + if (mode < ARRAY_SIZE(pplm_fec_2_ethtool)) + return pplm_fec_2_ethtool[mode]; + + return 0; +} + +/* we use ETHTOOL_FEC_* offset and apply it to ETHTOOL_LINK_MODE_FEC_*_BIT */ +static u32 ethtool_fec2ethtool_caps(u_long ethtool_fec_code) +{ + u32 offset; + + offset = find_first_bit(ðtool_fec_code, sizeof(u32)); + offset -= ETHTOOL_FEC_OFF_BIT; + offset += ETHTOOL_LINK_MODE_FEC_NONE_BIT; + + return offset; +} + +static int get_fec_supported_advertised(struct mlx5_core_dev *dev, + struct ethtool_link_ksettings *link_ksettings) +{ + u_long fec_caps = 0; + u32 active_fec = 0; + u32 offset; + u32 bitn; + int err; + + err = mlx5e_get_fec_caps(dev, (u8 *)&fec_caps); + if (err) + return (err == -EOPNOTSUPP) ? 0 : err; + + err = mlx5e_get_fec_mode(dev, &active_fec, NULL); + if (err) + return err; + + for_each_set_bit(bitn, &fec_caps, ARRAY_SIZE(pplm_fec_2_ethtool)) { + u_long ethtool_bitmask = pplm_fec_2_ethtool[bitn]; + + offset = ethtool_fec2ethtool_caps(ethtool_bitmask); + __set_bit(offset, link_ksettings->link_modes.supported); + } + + active_fec = pplm2ethtool_fec(active_fec, sizeof(u32) * BITS_PER_BYTE); + offset = ethtool_fec2ethtool_caps(active_fec); + __set_bit(offset, link_ksettings->link_modes.advertising); + + return 0; +} + static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings, u32 eth_proto_cap, u8 connector_type) @@ -734,7 +806,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, if (err) { netdev_err(netdev, "%s: query port ptys failed: %d\n", __func__, err); - goto err_query_ptys; + goto err_query_regs; } eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); @@ -770,11 +842,17 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, AUTONEG_ENABLE; ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Autoneg); + + err = get_fec_supported_advertised(mdev, link_ksettings); + if (err) + netdev_dbg(netdev, "%s: FEC caps query failed: %d\n", + __func__, err); + if (!an_disable_admin) ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Autoneg); -err_query_ptys: +err_query_regs: return err; } @@ -852,18 +930,30 @@ out: return err; } +u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv) +{ + return sizeof(priv->channels.params.toeplitz_hash_key); +} + static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - return sizeof(priv->channels.params.toeplitz_hash_key); + return mlx5e_ethtool_get_rxfh_key_size(priv); } -static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) +u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv) { return MLX5E_INDIR_RQT_SIZE; } +static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_get_rxfh_indir_size(priv); +} + static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { @@ -1257,6 +1347,58 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return mlx5_set_port_wol(mdev, mlx5_wol_mode); } +static int mlx5e_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u8 fec_configured = 0; + u32 fec_active = 0; + int err; + + err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured); + + if (err) + return err; + + fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active, + sizeof(u32) * BITS_PER_BYTE); + + if (!fecparam->active_fec) + return -EOPNOTSUPP; + + fecparam->fec = pplm2ethtool_fec((u_long)fec_configured, + sizeof(u8) * BITS_PER_BYTE); + + return 0; +} + +static int mlx5e_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u8 fec_policy = 0; + int mode; + int err; + + for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) { + if (!(pplm_fec_2_ethtool[mode] & fecparam->fec)) + continue; + fec_policy |= (1 << mode); + break; + } + + err = mlx5e_set_fec_mode(mdev, fec_policy); + + if (err) + return err; + + mlx5_toggle_port_link(mdev); + + return 0; +} + static u32 mlx5e_get_msglevel(struct net_device *dev) { return ((struct mlx5e_priv *)netdev_priv(dev))->msglevel; @@ -1512,6 +1654,27 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) return 0; } +static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_channels *channels = &priv->channels; + struct mlx5e_channel *c; + int i; + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return 0; + + for (i = 0; i < channels->num; i++) { + c = channels->c[i]; + if (enable) + __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); + else + __clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); + } + + return 0; +} + static int mlx5e_handle_pflag(struct net_device *netdev, u32 wanted_flags, enum mlx5e_priv_flag flag, @@ -1563,6 +1726,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) err = mlx5e_handle_pflag(netdev, pflags, MLX5E_PFLAG_RX_STRIDING_RQ, set_pflag_rx_striding_rq); + if (err) + goto out; + + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, + set_pflag_rx_no_csum_complete); out: mutex_unlock(&priv->state_lock); @@ -1652,4 +1821,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .self_test = mlx5e_self_test, .get_msglevel = mlx5e_get_msglevel, .set_msglevel = mlx5e_set_msglevel, + .get_fecparam = mlx5e_get_fecparam, + .set_fecparam = mlx5e_set_fecparam, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 41cde926cdab..c18dcebe1462 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -131,14 +131,14 @@ set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m, if (ip4src_m) { memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4), &ip4src_v, sizeof(ip4src_v)); - memset(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4), - 0xff, sizeof(ip4src_m)); + memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4), + &ip4src_m, sizeof(ip4src_m)); } if (ip4dst_m) { memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), &ip4dst_v, sizeof(ip4dst_v)); - memset(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), - 0xff, sizeof(ip4dst_m)); + memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &ip4dst_m, sizeof(ip4dst_m)); } MLX5E_FTE_SET(headers_c, ethertype, 0xffff); @@ -173,11 +173,11 @@ set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v, __be16 pdst_m, __be16 pdst_v) { if (psrc_m) { - MLX5E_FTE_SET(headers_c, tcp_sport, 0xffff); + MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m)); MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v)); } if (pdst_m) { - MLX5E_FTE_SET(headers_c, tcp_dport, 0xffff); + MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m)); MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v)); } @@ -190,12 +190,12 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v, __be16 pdst_m, __be16 pdst_v) { if (psrc_m) { - MLX5E_FTE_SET(headers_c, udp_sport, 0xffff); + MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m)); MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v)); } if (pdst_m) { - MLX5E_FTE_SET(headers_c, udp_dport, 0xffff); + MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m)); MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v)); } @@ -508,26 +508,14 @@ static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs) if (l4_mask->tos) return -EINVAL; - if (l4_mask->ip4src) { - if (!all_ones(l4_mask->ip4src)) - return -EINVAL; + if (l4_mask->ip4src) ntuples++; - } - if (l4_mask->ip4dst) { - if (!all_ones(l4_mask->ip4dst)) - return -EINVAL; + if (l4_mask->ip4dst) ntuples++; - } - if (l4_mask->psrc) { - if (!all_ones(l4_mask->psrc)) - return -EINVAL; + if (l4_mask->psrc) ntuples++; - } - if (l4_mask->pdst) { - if (!all_ones(l4_mask->pdst)) - return -EINVAL; + if (l4_mask->pdst) ntuples++; - } /* Flow is TCP/UDP */ return ++ntuples; } @@ -540,16 +528,10 @@ static int validate_ip4(struct ethtool_rx_flow_spec *fs) if (l3_mask->l4_4_bytes || l3_mask->tos || fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) return -EINVAL; - if (l3_mask->ip4src) { - if (!all_ones(l3_mask->ip4src)) - return -EINVAL; + if (l3_mask->ip4src) ntuples++; - } - if (l3_mask->ip4dst) { - if (!all_ones(l3_mask->ip4dst)) - return -EINVAL; + if (l3_mask->ip4dst) ntuples++; - } if (l3_mask->proto) ntuples++; /* Flow is IPv4 */ @@ -588,16 +570,10 @@ static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs) if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst)) ntuples++; - if (l4_mask->psrc) { - if (!all_ones(l4_mask->psrc)) - return -EINVAL; + if (l4_mask->psrc) ntuples++; - } - if (l4_mask->pdst) { - if (!all_ones(l4_mask->pdst)) - return -EINVAL; + if (l4_mask->pdst) ntuples++; - } /* Flow is TCP/UDP */ return ++ntuples; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f291d1bf1558..1243edbedc9e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -272,10 +272,9 @@ static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv) mlx5e_stats_grps[i].update_stats(priv); } -void mlx5e_update_stats_work(struct work_struct *work) +static void mlx5e_update_stats_work(struct work_struct *work) { - struct delayed_work *dwork = to_delayed_work(work); - struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, update_stats_work); mutex_lock(&priv->state_lock); @@ -283,6 +282,17 @@ void mlx5e_update_stats_work(struct work_struct *work) mutex_unlock(&priv->state_lock); } +void mlx5e_queue_update_stats(struct mlx5e_priv *priv) +{ + if (!priv->profile->update_stats) + return; + + if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state))) + return; + + queue_work(priv->wq, &priv->update_stats_work); +} + static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, enum mlx5_dev_event event, unsigned long param) { @@ -929,6 +939,9 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (params->rx_dim_enabled) __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); + if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) + __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); + return 0; err_destroy_rq: @@ -1786,7 +1799,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, struct mlx5e_channel_param *cparam) { struct mlx5e_priv *priv = c->priv; - int err, tc, max_nch = priv->profile->max_nch(priv->mdev); + int err, tc, max_nch = mlx5e_get_netdev_max_channels(priv->netdev); for (tc = 0; tc < params->num_tc; tc++) { int txq_ix = c->ix + tc * max_nch; @@ -2426,7 +2439,7 @@ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv) int err; int ix; - for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { + for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) { rqt = &priv->direct_tir[ix].rqt; err = mlx5e_create_rqt(priv, 1 /*size */, rqt); if (err) @@ -2447,7 +2460,7 @@ void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv) { int i; - for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) + for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); } @@ -2541,7 +2554,7 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv, mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); } - for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { + for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) { struct mlx5e_redirect_rqt_param direct_rrp = { .is_rss = false, { @@ -2742,7 +2755,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) goto free_in; } - for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { + for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) { err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in, inlen); if (err) @@ -2842,7 +2855,7 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv) { - int max_nch = priv->profile->max_nch(priv->mdev); + int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); int i, tc; for (i = 0; i < max_nch; i++) @@ -2954,9 +2967,7 @@ int mlx5e_open_locked(struct net_device *netdev) if (priv->profile->update_carrier) priv->profile->update_carrier(priv); - if (priv->profile->update_stats) - queue_delayed_work(priv->wq, &priv->update_stats_work, 0); - + mlx5e_queue_update_stats(priv); return 0; err_clear_state_opened_flag: @@ -3049,8 +3060,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, return mlx5e_alloc_cq_common(mdev, param, cq); } -static int mlx5e_open_drop_rq(struct mlx5e_priv *priv, - struct mlx5e_rq *drop_rq) +int mlx5e_open_drop_rq(struct mlx5e_priv *priv, + struct mlx5e_rq *drop_rq) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_cq_param cq_param = {}; @@ -3094,7 +3105,7 @@ err_free_cq: return err; } -static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) +void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) { mlx5e_destroy_rq(drop_rq); mlx5e_free_rq(drop_rq); @@ -3175,7 +3186,7 @@ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *t MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); } -int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) +int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) { struct mlx5e_tir *tir; void *tirc; @@ -3202,7 +3213,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) } } - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) goto out; for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { @@ -3236,7 +3247,7 @@ err_destroy_inner_tirs: int mlx5e_create_direct_tirs(struct mlx5e_priv *priv) { - int nch = priv->profile->max_nch(priv->mdev); + int nch = mlx5e_get_netdev_max_channels(priv->netdev); struct mlx5e_tir *tir; void *tirc; int inlen; @@ -3273,14 +3284,14 @@ err_destroy_ch_tirs: return err; } -void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) +void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) { int i; for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) return; for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) @@ -3289,7 +3300,7 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv) { - int nch = priv->profile->max_nch(priv->mdev); + int nch = mlx5e_get_netdev_max_channels(priv->netdev); int i; for (i = 0; i < nch; i++) @@ -3381,9 +3392,6 @@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, { struct mlx5e_priv *priv = cb_priv; - if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) - return -EOPNOTSUPP; - switch (type) { case TC_SETUP_CLSFLOWER: return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS); @@ -3438,7 +3446,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_pport_stats *pstats = &priv->stats.pport; /* update HW stats in background for next time */ - queue_delayed_work(priv->wq, &priv->update_stats_work, 0); + mlx5e_queue_update_stats(priv); if (mlx5e_is_uplink_rep(priv)) { stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); @@ -4480,6 +4488,31 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); } +void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + /* Prefer Striding RQ, unless any of the following holds: + * - Striding RQ configuration is not possible/supported. + * - Slow PCI heuristic. + * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. + */ + if (!slow_pci_heuristic(mdev) && + mlx5e_striding_rq_possible(mdev, params) && + (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) || + !mlx5e_rx_is_linear_skb(mdev, params))) + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); + mlx5e_set_rq_type(mdev, params); + mlx5e_init_rq_type_params(mdev, params); +} + +void mlx5e_build_rss_params(struct mlx5e_params *params) +{ + params->rss_hfunc = ETH_RSS_HASH_XOR; + netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key)); + mlx5e_build_default_indir_rqt(params->indirection_rqt, + MLX5E_INDIR_RQT_SIZE, params->num_channels); +} + void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 max_channels, u16 mtu) @@ -4503,20 +4536,10 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, params->rx_cqe_compress_def = slow_pci_heuristic(mdev); MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false); /* RQ */ - /* Prefer Striding RQ, unless any of the following holds: - * - Striding RQ configuration is not possible/supported. - * - Slow PCI heuristic. - * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. - */ - if (!slow_pci_heuristic(mdev) && - mlx5e_striding_rq_possible(mdev, params) && - (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) || - !mlx5e_rx_is_linear_skb(mdev, params))) - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); - mlx5e_set_rq_type(mdev, params); - mlx5e_init_rq_type_params(mdev, params); + mlx5e_build_rq_params(mdev, params); /* HW LRO */ @@ -4539,37 +4562,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); /* RSS */ - params->rss_hfunc = ETH_RSS_HASH_XOR; - netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key)); - mlx5e_build_default_indir_rqt(params->indirection_rqt, - MLX5E_INDIR_RQT_SIZE, max_channels); -} - -static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - priv->mdev = mdev; - priv->netdev = netdev; - priv->profile = profile; - priv->ppriv = ppriv; - priv->msglevel = MLX5E_MSG_LEVEL; - priv->max_opened_tc = 1; - - mlx5e_build_nic_params(mdev, &priv->channels.params, - profile->max_nch(mdev), netdev->mtu); - - mutex_init(&priv->state_lock); - - INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); - INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); - INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); - INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); - - mlx5e_timestamp_init(priv); + mlx5e_build_rss_params(params); } static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) @@ -4707,7 +4700,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_tls_build_netdev(priv); } -static void mlx5e_create_q_counters(struct mlx5e_priv *priv) +void mlx5e_create_q_counters(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; int err; @@ -4725,7 +4718,7 @@ static void mlx5e_create_q_counters(struct mlx5e_priv *priv) } } -static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) +void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) { if (priv->q_counter) mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); @@ -4734,15 +4727,23 @@ static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter); } -static void mlx5e_nic_init(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv) +static int mlx5e_nic_init(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; - mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv); + err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); + if (err) + return err; + + mlx5e_build_nic_params(mdev, &priv->channels.params, + mlx5e_get_netdev_max_channels(netdev), netdev->mtu); + + mlx5e_timestamp_init(priv); + err = mlx5e_ipsec_init(priv); if (err) mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); @@ -4751,12 +4752,15 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_build_nic_netdev(netdev); mlx5e_build_tc2txq_maps(priv); + + return 0; } static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); + mlx5e_netdev_cleanup(priv->netdev, priv); } static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) @@ -4764,15 +4768,23 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; int err; + mlx5e_create_q_counters(priv); + + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_destroy_q_counters; + } + err = mlx5e_create_indirect_rqt(priv); if (err) - return err; + goto err_close_drop_rq; err = mlx5e_create_direct_rqts(priv); if (err) goto err_destroy_indirect_rqts; - err = mlx5e_create_indirect_tirs(priv); + err = mlx5e_create_indirect_tirs(priv, true); if (err) goto err_destroy_direct_rqts; @@ -4797,11 +4809,15 @@ err_destroy_flow_steering: err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_indirect_tirs(priv, true); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_close_drop_rq: + mlx5e_close_drop_rq(&priv->drop_rq); +err_destroy_q_counters: + mlx5e_destroy_q_counters(priv); return err; } @@ -4810,9 +4826,11 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) mlx5e_tc_nic_cleanup(priv); mlx5e_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv); - mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_indirect_tirs(priv, true); mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_destroy_q_counters(priv); } static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) @@ -4905,7 +4923,6 @@ static const struct mlx5e_profile mlx5e_nic_profile = { .enable = mlx5e_nic_enable, .disable = mlx5e_nic_disable, .update_stats = mlx5e_update_ndo_stats, - .max_nch = mlx5e_get_max_num_channels, .update_carrier = mlx5e_update_carrier, .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe, .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, @@ -4914,13 +4931,53 @@ static const struct mlx5e_profile mlx5e_nic_profile = { /* mlx5e generic netdev management API (move to en_common.c) */ +/* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */ +int mlx5e_netdev_init(struct net_device *netdev, + struct mlx5e_priv *priv, + struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile, + void *ppriv) +{ + /* priv init */ + priv->mdev = mdev; + priv->netdev = netdev; + priv->profile = profile; + priv->ppriv = ppriv; + priv->msglevel = MLX5E_MSG_LEVEL; + priv->max_opened_tc = 1; + + mutex_init(&priv->state_lock); + INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); + INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); + INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); + INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work); + + priv->wq = create_singlethread_workqueue("mlx5e"); + if (!priv->wq) + return -ENOMEM; + + /* netdev init */ + netif_carrier_off(netdev); + +#ifdef CONFIG_MLX5_EN_ARFS + netdev->rx_cpu_rmap = mdev->rmap; +#endif + + return 0; +} + +void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv) +{ + destroy_workqueue(priv->wq); +} + struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, + int nch, void *ppriv) { - int nch = profile->max_nch(mdev); struct net_device *netdev; - struct mlx5e_priv *priv; + int err; netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch * profile->max_tc, @@ -4930,25 +4987,15 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, return NULL; } -#ifdef CONFIG_MLX5_EN_ARFS - netdev->rx_cpu_rmap = mdev->rmap; -#endif - - profile->init(mdev, netdev, profile, ppriv); - - netif_carrier_off(netdev); - - priv = netdev_priv(netdev); - - priv->wq = create_singlethread_workqueue("mlx5e"); - if (!priv->wq) - goto err_cleanup_nic; + err = profile->init(mdev, netdev, profile, ppriv); + if (err) { + mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err); + goto err_free_netdev; + } return netdev; -err_cleanup_nic: - if (profile->cleanup) - profile->cleanup(priv); +err_free_netdev: free_netdev(netdev); return NULL; @@ -4956,7 +5003,6 @@ err_cleanup_nic: int mlx5e_attach_netdev(struct mlx5e_priv *priv) { - struct mlx5_core_dev *mdev = priv->mdev; const struct mlx5e_profile *profile; int err; @@ -4967,28 +5013,16 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) if (err) goto out; - mlx5e_create_q_counters(priv); - - err = mlx5e_open_drop_rq(priv, &priv->drop_rq); - if (err) { - mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_destroy_q_counters; - } - err = profile->init_rx(priv); if (err) - goto err_close_drop_rq; + goto err_cleanup_tx; if (profile->enable) profile->enable(priv); return 0; -err_close_drop_rq: - mlx5e_close_drop_rq(&priv->drop_rq); - -err_destroy_q_counters: - mlx5e_destroy_q_counters(priv); +err_cleanup_tx: profile->cleanup_tx(priv); out: @@ -5006,10 +5040,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) flush_workqueue(priv->wq); profile->cleanup_rx(priv); - mlx5e_close_drop_rq(&priv->drop_rq); - mlx5e_destroy_q_counters(priv); profile->cleanup_tx(priv); - cancel_delayed_work_sync(&priv->update_stats_work); + cancel_work_sync(&priv->update_stats_work); } void mlx5e_destroy_netdev(struct mlx5e_priv *priv) @@ -5017,7 +5049,6 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv) const struct mlx5e_profile *profile = priv->profile; struct net_device *netdev = priv->netdev; - destroy_workqueue(priv->wq); if (profile->cleanup) profile->cleanup(priv); free_netdev(netdev); @@ -5066,6 +5097,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) void *rpriv = NULL; void *priv; int err; + int nch; err = mlx5e_check_required_hca_cap(mdev); if (err) @@ -5081,7 +5113,8 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) } #endif - netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv); + nch = mlx5e_get_max_num_channels(mdev); + netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, rpriv); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); goto err_free_rpriv; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index c9cc9747d21d..c3c657548824 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -46,8 +46,6 @@ #define MLX5E_REP_PARAMS_LOG_SQ_SIZE \ max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) -#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \ - max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE) static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; @@ -182,12 +180,108 @@ static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) } } +static void mlx5e_rep_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + mlx5e_ethtool_get_ringparam(priv, param); +} + +static int mlx5e_rep_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + return mlx5e_ethtool_set_ringparam(priv, param); +} + +static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv, + struct mlx5_flow_destination *dest) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; + struct mlx5_flow_handle *flow_rule; + + flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, + rep->vport, + dest); + if (IS_ERR(flow_rule)) + return PTR_ERR(flow_rule); + + mlx5_del_flow_rules(rpriv->vport_rx_rule); + rpriv->vport_rx_rule = flow_rule; + return 0; +} + +static void mlx5e_rep_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + mlx5e_ethtool_get_channels(priv, ch); +} + +static int mlx5e_rep_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + u16 curr_channels_amount = priv->channels.params.num_channels; + u32 new_channels_amount = ch->combined_count; + struct mlx5_flow_destination new_dest; + int err = 0; + + err = mlx5e_ethtool_set_channels(priv, ch); + if (err) + return err; + + if (curr_channels_amount == 1 && new_channels_amount > 1) { + new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + new_dest.ft = priv->fs.ttc.ft.t; + } else if (new_channels_amount == 1 && curr_channels_amount > 1) { + new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + new_dest.tir_num = priv->direct_tir[0].tirn; + } else { + return 0; + } + + err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest); + if (err) { + netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n", + curr_channels_amount, new_channels_amount); + return err; + } + + return 0; +} + +static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_get_rxfh_key_size(priv); +} + +static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_get_rxfh_indir_size(priv); +} + static const struct ethtool_ops mlx5e_rep_ethtool_ops = { .get_drvinfo = mlx5e_rep_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = mlx5e_rep_get_strings, .get_sset_count = mlx5e_rep_get_sset_count, .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, + .get_ringparam = mlx5e_rep_get_ringparam, + .set_ringparam = mlx5e_rep_set_ringparam, + .get_channels = mlx5e_rep_get_channels, + .set_channels = mlx5e_rep_set_channels, + .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, + .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, }; int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) @@ -759,9 +853,6 @@ static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data, { struct mlx5e_priv *priv = cb_priv; - if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) - return -EOPNOTSUPP; - switch (type) { case TC_SETUP_CLSFLOWER: return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS); @@ -775,9 +866,6 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, { struct mlx5e_priv *priv = cb_priv; - if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) - return -EOPNOTSUPP; - switch (type) { case TC_SETUP_CLSFLOWER: return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS); @@ -898,8 +986,7 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_priv *priv = netdev_priv(dev); /* update HW stats in background for next time */ - queue_delayed_work(priv->wq, &priv->update_stats_work, 0); - + mlx5e_queue_update_stats(priv); memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); } @@ -934,16 +1021,20 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, params->hard_mtu = MLX5E_ETH_HARD_MTU; params->sw_mtu = mtu; params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; - params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC; - params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE; + /* RQ */ + mlx5e_build_rq_params(mdev, params); + + /* CQ moderation params */ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); params->num_tc = 1; - params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); + + /* RSS */ + mlx5e_build_rss_params(params); } static void mlx5e_build_rep_netdev(struct net_device *netdev) @@ -963,6 +1054,16 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; netdev->hw_features |= NETIF_F_HW_TC; + netdev->hw_features |= NETIF_F_SG; + netdev->hw_features |= NETIF_F_IP_CSUM; + netdev->hw_features |= NETIF_F_IPV6_CSUM; + netdev->hw_features |= NETIF_F_GRO; + netdev->hw_features |= NETIF_F_TSO; + netdev->hw_features |= NETIF_F_TSO6; + netdev->hw_features |= NETIF_F_RXCSUM; + + netdev->features |= netdev->hw_features; + eth_hw_addr_random(netdev); netdev->min_mtu = ETH_MIN_MTU; @@ -970,63 +1071,127 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); } -static void mlx5e_init_rep(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv) +static int mlx5e_init_rep(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { struct mlx5e_priv *priv = netdev_priv(netdev); + int err; - priv->mdev = mdev; - priv->netdev = netdev; - priv->profile = profile; - priv->ppriv = ppriv; - - mutex_init(&priv->state_lock); + err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); + if (err) + return err; - INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); - priv->channels.params.num_channels = profile->max_nch(mdev); + priv->channels.params.num_channels = + mlx5e_get_netdev_max_channels(netdev); mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu); mlx5e_build_rep_netdev(netdev); mlx5e_timestamp_init(priv); + + return 0; } -static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) +static void mlx5e_cleanup_rep(struct mlx5e_priv *priv) +{ + mlx5e_netdev_cleanup(priv->netdev, priv); +} + +static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv) +{ + struct ttc_params ttc_params = {}; + int tt, err; + + priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, + MLX5_FLOW_NAMESPACE_KERNEL); + + /* The inner_ttc in the ttc params is intentionally not set */ + ttc_params.any_tt_tirn = priv->direct_tir[0].tirn; + mlx5e_set_ttc_ft_params(&ttc_params); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; + + err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); + if (err) { + netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err); + return err; + } + return 0; +} + +static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_flow_handle *flow_rule; + struct mlx5_flow_destination dest; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest.tir_num = priv->direct_tir[0].tirn; + flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, + rep->vport, + &dest); + if (IS_ERR(flow_rule)) + return PTR_ERR(flow_rule); + rpriv->vport_rx_rule = flow_rule; + return 0; +} + +static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; int err; mlx5e_init_l2_addr(priv); + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + return err; + } + + err = mlx5e_create_indirect_rqt(priv); + if (err) + goto err_close_drop_rq; + err = mlx5e_create_direct_rqts(priv); if (err) - return err; + goto err_destroy_indirect_rqts; - err = mlx5e_create_direct_tirs(priv); + err = mlx5e_create_indirect_tirs(priv, false); if (err) goto err_destroy_direct_rqts; - flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, - rep->vport, - priv->direct_tir[0].tirn); - if (IS_ERR(flow_rule)) { - err = PTR_ERR(flow_rule); + err = mlx5e_create_direct_tirs(priv); + if (err) + goto err_destroy_indirect_tirs; + + err = mlx5e_create_rep_ttc_table(priv); + if (err) goto err_destroy_direct_tirs; - } - rpriv->vport_rx_rule = flow_rule; + + err = mlx5e_create_rep_vport_rx_rule(priv); + if (err) + goto err_destroy_ttc_table; return 0; +err_destroy_ttc_table: + mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); +err_destroy_indirect_tirs: + mlx5e_destroy_indirect_tirs(priv, false); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv); +err_destroy_indirect_rqts: + mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_close_drop_rq: + mlx5e_close_drop_rq(&priv->drop_rq); return err; } @@ -1035,8 +1200,12 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; mlx5_del_flow_rules(rpriv->vport_rx_rule); + mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); mlx5e_destroy_direct_tirs(priv); + mlx5e_destroy_indirect_tirs(priv, false); mlx5e_destroy_direct_rqts(priv); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); } static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) @@ -1051,23 +1220,17 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) return 0; } -static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev) -{ -#define MLX5E_PORT_REPRESENTOR_NCH 1 - return MLX5E_PORT_REPRESENTOR_NCH; -} - static const struct mlx5e_profile mlx5e_rep_profile = { .init = mlx5e_init_rep, + .cleanup = mlx5e_cleanup_rep, .init_rx = mlx5e_init_rep_rx, .cleanup_rx = mlx5e_cleanup_rep_rx, .init_tx = mlx5e_init_rep_tx, .cleanup_tx = mlx5e_cleanup_nic_tx, .update_stats = mlx5e_rep_update_hw_counters, - .max_nch = mlx5e_get_rep_max_num_channels, .update_carrier = NULL, .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, - .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */, + .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, .max_tc = 1, }; @@ -1127,13 +1290,14 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) struct mlx5e_rep_priv *rpriv; struct net_device *netdev; struct mlx5e_priv *upriv; - int err; + int nch, err; rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); if (!rpriv) return -ENOMEM; - netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv); + nch = mlx5e_get_max_num_channels(dev); + netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, nch, rpriv); if (!netdev) { pr_warn("Failed to create representor netdev for vport %d\n", rep->vport); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 15d8ae28c040..2f7fb8de6967 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -37,6 +37,7 @@ #include <net/busy_poll.h> #include <net/ip6_checksum.h> #include <net/page_pool.h> +#include <net/inet_ecn.h> #include "en.h" #include "en_tc.h" #include "eswitch.h" @@ -432,10 +433,9 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, struct mlx5_wq_cyc *wq, - u16 pi, u16 frag_pi) + u16 pi, u16 nnops) { struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi]; - u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi; edge_wi = wi + nnops; @@ -454,15 +454,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *umr_wqe; u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); - u16 pi, frag_pi; + u16 pi, contig_wqebbs_room; int err; int i; pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); - frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); - - if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) { - mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi); + contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); + if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) { + mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room); pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); } @@ -690,12 +689,29 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); } -static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth) +static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, + __be16 *proto) { - __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; + *proto = ((struct ethhdr *)skb->data)->h_proto; + *proto = __vlan_get_protocol(skb, *proto, network_depth); + return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6)); +} + +static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) +{ + int network_depth = 0; + __be16 proto; + void *ip; + int rc; + + if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto))) + return; - ethertype = __vlan_get_protocol(skb, ethertype, network_depth); - return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); + ip = skb->data + network_depth; + rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) : + IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip)); + + rq->stats->ecn_mark += !!rc; } static __be32 mlx5e_get_fcs(struct sk_buff *skb) @@ -737,6 +753,14 @@ static __be32 mlx5e_get_fcs(struct sk_buff *skb) return fcs_bytes; } +static u8 get_ip_proto(struct sk_buff *skb, __be16 proto) +{ + void *ip_p = skb->data + sizeof(struct ethhdr); + + return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : + ((struct ipv6hdr *)ip_p)->nexthdr; +} + static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, @@ -745,6 +769,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, { struct mlx5e_rq_stats *stats = rq->stats; int network_depth = 0; + __be16 proto; if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) goto csum_none; @@ -755,7 +780,13 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } - if (likely(is_last_ethertype_ip(skb, &network_depth))) { + if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) + goto csum_unnecessary; + + if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { + if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP)) + goto csum_unnecessary; + skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); if (network_depth > ETH_HLEN) @@ -773,8 +804,10 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } +csum_unnecessary: if (likely((cqe->hds_ip_ext & CQE_L3_OK) && - (cqe->hds_ip_ext & CQE_L4_OK))) { + ((cqe->hds_ip_ext & CQE_L4_OK) || + (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) { skb->ip_summed = CHECKSUM_UNNECESSARY; if (cqe_is_tunneled(cqe)) { skb->csum_level = 1; @@ -790,6 +823,8 @@ csum_none: stats->csum_none++; } +#define MLX5E_CE_BIT_MASK 0x80 + static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct mlx5e_rq *rq, @@ -834,6 +869,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); + /* checking CE bit in cqe - MSB in ml_path field */ + if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK)) + mlx5e_enable_ecn(rq, skb); + skb->protocol = eth_type_trans(skb, netdev); } @@ -1230,8 +1269,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { - struct mlx5e_rq_stats *stats = rq->stats; struct hwtstamp_config *tstamp; + struct mlx5e_rq_stats *stats; struct net_device *netdev; struct mlx5e_priv *priv; char *pseudo_header; @@ -1254,6 +1293,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, priv = mlx5i_epriv(netdev); tstamp = &priv->tstamp; + stats = &priv->channel_stats[rq->ix].rq; g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 6839481f7697..1e55b9c27ffc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -53,6 +53,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, @@ -92,6 +93,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, @@ -131,7 +133,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) memset(s, 0, sizeof(*s)); - for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { + for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) { struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq; @@ -144,6 +146,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_bytes += rq_stats->bytes; s->rx_lro_packets += rq_stats->lro_packets; s->rx_lro_bytes += rq_stats->lro_bytes; + s->rx_ecn_mark += rq_stats->ecn_mark; s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; s->rx_csum_complete += rq_stats->csum_complete; @@ -168,6 +171,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_busy += rq_stats->cache_busy; s->rx_cache_waive += rq_stats->cache_waive; s->rx_congst_umr += rq_stats->congst_umr; + s->rx_arfs_err += rq_stats->arfs_err; s->ch_events += ch_stats->events; s->ch_poll += ch_stats->poll; s->ch_arm += ch_stats->arm; @@ -610,46 +614,82 @@ static const struct counter_desc pport_phy_statistical_stats_desc[] = { { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, }; -#define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc) +static const struct counter_desc +pport_phy_statistical_err_lanes_stats_desc[] = { + { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) }, + { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) }, + { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) }, + { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) }, +}; + +#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \ + ARRAY_SIZE(pport_phy_statistical_stats_desc) +#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \ + ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc) static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv) { + struct mlx5_core_dev *mdev = priv->mdev; + int num_stats; + /* "1" for link_down_events special counter */ - return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ? - NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1; + num_stats = 1; + + num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ? + NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0; + + num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ? + NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0; + + return num_stats; } static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) { + struct mlx5_core_dev *mdev = priv->mdev; int i; strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy"); - if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) + if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) return idx; for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_phy_statistical_stats_desc[i].format); + + if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_phy_statistical_err_lanes_stats_desc[i].format); + return idx; } static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) { + struct mlx5_core_dev *mdev = priv->mdev; int i; /* link_down_events_phy has special handling since it is not stored in __be64 format */ data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, counter_set.phys_layer_cntrs.link_down_events); - if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) + if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) return idx; for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, pport_phy_statistical_stats_desc, i); + + if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, + pport_phy_statistical_err_lanes_stats_desc, + i); return idx; } @@ -1144,6 +1184,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, @@ -1158,6 +1199,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, }; static const struct counter_desc sq_stats_desc[] = { @@ -1211,7 +1253,7 @@ static const struct counter_desc ch_stats_desc[] = { static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) { - int max_nch = priv->profile->max_nch(priv->mdev); + int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); return (NUM_RQ_STATS * max_nch) + (NUM_CH_STATS * max_nch) + @@ -1223,7 +1265,7 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) { - int max_nch = priv->profile->max_nch(priv->mdev); + int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); int i, j, tc; for (i = 0; i < max_nch; i++) @@ -1258,7 +1300,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) { - int max_nch = priv->profile->max_nch(priv->mdev); + int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); int i, j, tc; for (i = 0; i < max_nch; i++) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index a4c035aedd46..77f74ce11280 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -66,6 +66,7 @@ struct mlx5e_sw_stats { u64 tx_nop; u64 rx_lro_packets; u64 rx_lro_bytes; + u64 rx_ecn_mark; u64 rx_removed_vlan_packets; u64 rx_csum_unnecessary; u64 rx_csum_none; @@ -105,6 +106,7 @@ struct mlx5e_sw_stats { u64 rx_cache_busy; u64 rx_cache_waive; u64 rx_congst_umr; + u64 rx_arfs_err; u64 ch_events; u64 ch_poll; u64 ch_arm; @@ -184,6 +186,7 @@ struct mlx5e_rq_stats { u64 csum_none; u64 lro_packets; u64 lro_bytes; + u64 ecn_mark; u64 removed_vlan_packets; u64 xdp_drop; u64 xdp_redirect; @@ -200,6 +203,7 @@ struct mlx5e_rq_stats { u64 cache_busy; u64 cache_waive; u64 congst_umr; + u64 arfs_err; }; struct mlx5e_sq_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 85796727093e..608025ca5c04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -61,6 +61,7 @@ struct mlx5_nic_flow_attr { u32 hairpin_tirn; u8 match_level; struct mlx5_flow_table *hairpin_ft; + struct mlx5_fc *counter; }; #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1) @@ -73,6 +74,7 @@ enum { MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2), MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3), MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4), + MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 5), }; #define MLX5E_TC_MAX_SPLITS 1 @@ -81,7 +83,7 @@ struct mlx5e_tc_flow { struct rhash_head node; struct mlx5e_priv *priv; u64 cookie; - u8 flags; + u16 flags; struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; struct list_head encap; /* flows sharing the same encap ID */ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ @@ -100,11 +102,6 @@ struct mlx5e_tc_flow_parse_attr { int mirred_ifindex; }; -enum { - MLX5_HEADER_TYPE_VXLAN = 0x0, - MLX5_HEADER_TYPE_NVGRE = 0x1, -}; - #define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16) @@ -532,7 +529,8 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv, #define UNKNOWN_MATCH_PRIO 8 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, - struct mlx5_flow_spec *spec, u8 *match_prio) + struct mlx5_flow_spec *spec, u8 *match_prio, + struct netlink_ext_ack *extack) { void *headers_c, *headers_v; u8 prio_val, prio_mask = 0; @@ -540,8 +538,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, #ifdef CONFIG_MLX5_CORE_EN_DCB if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) { - netdev_warn(priv->netdev, - "only PCP trust state supported for hairpin\n"); + NL_SET_ERR_MSG_MOD(extack, + "only PCP trust state supported for hairpin"); return -EOPNOTSUPP; } #endif @@ -557,8 +555,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, if (!vlan_present || !prio_mask) { prio_val = UNKNOWN_MATCH_PRIO; } else if (prio_mask != 0x7) { - netdev_warn(priv->netdev, - "masked priority match not supported for hairpin\n"); + NL_SET_ERR_MSG_MOD(extack, + "masked priority match not supported for hairpin"); return -EOPNOTSUPP; } @@ -568,7 +566,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, - struct mlx5e_tc_flow_parse_attr *parse_attr) + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct netlink_ext_ack *extack) { int peer_ifindex = parse_attr->mirred_ifindex; struct mlx5_hairpin_params params; @@ -583,12 +582,13 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) { - netdev_warn(priv->netdev, "hairpin is not supported\n"); + NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported"); return -EOPNOTSUPP; } peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id); - err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio); + err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio, + extack); if (err) return err; hpe = mlx5e_hairpin_get(priv, peer_id, match_prio); @@ -674,29 +674,28 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, } } -static struct mlx5_flow_handle * +static int mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_nic_flow_attr *attr = flow->nic_attr; struct mlx5_core_dev *dev = priv->mdev; struct mlx5_flow_destination dest[2] = {}; struct mlx5_flow_act flow_act = { .action = attr->action, - .has_flow_tag = true, .flow_tag = attr->flow_tag, - .encap_id = 0, + .reformat_id = 0, + .flags = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND, }; struct mlx5_fc *counter = NULL; - struct mlx5_flow_handle *rule; bool table_created = false; int err, dest_ix = 0; if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) { - err = mlx5e_hairpin_flow_add(priv, flow, parse_attr); + err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); if (err) { - rule = ERR_PTR(err); goto err_add_hairpin_flow; } if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) { @@ -716,22 +715,21 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(dev, true); if (IS_ERR(counter)) { - rule = ERR_CAST(counter); + err = PTR_ERR(counter); goto err_fc_create; } dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[dest_ix].counter = counter; + dest[dest_ix].counter_id = mlx5_fc_id(counter); dest_ix++; + attr->counter = counter; } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); flow_act.modify_id = attr->mod_hdr_id; kfree(parse_attr->mod_hdr_actions); - if (err) { - rule = ERR_PTR(err); + if (err) goto err_create_mod_hdr_id; - } } if (IS_ERR_OR_NULL(priv->fs.tc.t)) { @@ -753,9 +751,11 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, MLX5E_TC_TABLE_NUM_GROUPS, MLX5E_TC_FT_LEVEL, 0); if (IS_ERR(priv->fs.tc.t)) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to create tc offload table\n"); netdev_err(priv->netdev, "Failed to create tc offload table\n"); - rule = ERR_CAST(priv->fs.tc.t); + err = PTR_ERR(priv->fs.tc.t); goto err_create_ft; } @@ -765,13 +765,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, if (attr->match_level != MLX5_MATCH_NONE) parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, - &flow_act, dest, dest_ix); + flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, + &flow_act, dest, dest_ix); - if (IS_ERR(rule)) + if (IS_ERR(flow->rule[0])) { + err = PTR_ERR(flow->rule[0]); goto err_add_rule; + } - return rule; + return 0; err_add_rule: if (table_created) { @@ -787,7 +789,7 @@ err_fc_create: if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) mlx5e_hairpin_flow_del(priv, flow); err_add_hairpin_flow: - return rule; + return err; } static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, @@ -796,7 +798,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, struct mlx5_nic_flow_attr *attr = flow->nic_attr; struct mlx5_fc *counter = NULL; - counter = mlx5_flow_rule_counter(flow->rule[0]); + counter = attr->counter; mlx5_del_flow_rules(flow->rule[0]); mlx5_fc_destroy(priv->mdev, counter); @@ -819,30 +821,119 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct ip_tunnel_info *tun_info, struct net_device *mirred_dev, struct net_device **encap_dev, - struct mlx5e_tc_flow *flow); + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack); static struct mlx5_flow_handle * +mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *attr) +{ + struct mlx5_flow_handle *rule; + + rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); + if (IS_ERR(rule)) + return rule; + + if (attr->mirror_count) { + flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr); + if (IS_ERR(flow->rule[1])) { + mlx5_eswitch_del_offloaded_rule(esw, rule, attr); + return flow->rule[1]; + } + } + + flow->flags |= MLX5E_TC_FLOW_OFFLOADED; + return rule; +} + +static void +mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, + struct mlx5_esw_flow_attr *attr) +{ + flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; + + if (attr->mirror_count) + mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); + + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); +} + +static struct mlx5_flow_handle * +mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *slow_attr) +{ + struct mlx5_flow_handle *rule; + + memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); + slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + slow_attr->mirror_count = 0, + slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN, + + rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); + if (!IS_ERR(rule)) + flow->flags |= MLX5E_TC_FLOW_SLOW; + + return rule; +} + +static void +mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, + struct mlx5_esw_flow_attr *slow_attr) +{ + memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); + mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); + flow->flags &= ~MLX5E_TC_FLOW_SLOW; +} + +static int mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + u32 max_chain = mlx5_eswitch_get_chain_range(esw); struct mlx5_esw_flow_attr *attr = flow->esw_attr; + u16 max_prio = mlx5_eswitch_get_prio_range(esw); struct net_device *out_dev, *encap_dev = NULL; - struct mlx5_flow_handle *rule = NULL; + struct mlx5_fc *counter = NULL; struct mlx5e_rep_priv *rpriv; struct mlx5e_priv *out_priv; - int err; + int err = 0, encap_err = 0; + + /* if prios are not supported, keep the old behaviour of using same prio + * for all offloaded rules. + */ + if (!mlx5_eswitch_prios_supported(esw)) + attr->prio = 1; - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) { + if (attr->chain > max_chain) { + NL_SET_ERR_MSG(extack, "Requested chain is out of supported range"); + err = -EOPNOTSUPP; + goto err_max_prio_chain; + } + + if (attr->prio > max_prio) { + NL_SET_ERR_MSG(extack, "Requested priority is out of supported range"); + err = -EOPNOTSUPP; + goto err_max_prio_chain; + } + + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { out_dev = __dev_get_by_index(dev_net(priv->netdev), attr->parse_attr->mirred_ifindex); - err = mlx5e_attach_encap(priv, &parse_attr->tun_info, - out_dev, &encap_dev, flow); - if (err) { - rule = ERR_PTR(err); - if (err != -EAGAIN) - goto err_attach_encap; + encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info, + out_dev, &encap_dev, flow, + extack); + if (encap_err && encap_err != -EAGAIN) { + err = encap_err; + goto err_attach_encap; } out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; @@ -851,49 +942,58 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, } err = mlx5_eswitch_add_vlan_action(esw, attr); - if (err) { - rule = ERR_PTR(err); + if (err) goto err_add_vlan; - } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); kfree(parse_attr->mod_hdr_actions); - if (err) { - rule = ERR_PTR(err); + if (err) goto err_mod_hdr; + } + + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + counter = mlx5_fc_create(esw->dev, true); + if (IS_ERR(counter)) { + err = PTR_ERR(counter); + goto err_create_counter; } + + attr->counter = counter; } - /* we get here if (1) there's no error (rule being null) or when + /* we get here if (1) there's no error or when * (2) there's an encap action and we're on -EAGAIN (no valid neigh) */ - if (rule != ERR_PTR(-EAGAIN)) { - rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); - if (IS_ERR(rule)) - goto err_add_rule; - - if (attr->mirror_count) { - flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr); - if (IS_ERR(flow->rule[1])) - goto err_fwd_rule; - } + if (encap_err == -EAGAIN) { + /* continue with goto slow path rule instead */ + struct mlx5_esw_flow_attr slow_attr; + + flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr); + } else { + flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); + } + + if (IS_ERR(flow->rule[0])) { + err = PTR_ERR(flow->rule[0]); + goto err_add_rule; } - return rule; -err_fwd_rule: - mlx5_eswitch_del_offloaded_rule(esw, rule, attr); - rule = flow->rule[1]; + return 0; + err_add_rule: + mlx5_fc_destroy(esw->dev, counter); +err_create_counter: if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); err_mod_hdr: mlx5_eswitch_del_vlan_action(esw, attr); err_add_vlan: - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) mlx5e_detach_encap(priv, flow); err_attach_encap: - return rule; +err_max_prio_chain: + return err; } static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, @@ -901,36 +1001,43 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr *attr = flow->esw_attr; + struct mlx5_esw_flow_attr slow_attr; if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { - flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; - if (attr->mirror_count) - mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr); - mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); + if (flow->flags & MLX5E_TC_FLOW_SLOW) + mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); + else + mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); } mlx5_eswitch_del_vlan_action(esw, attr); - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) { + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { mlx5e_detach_encap(priv, flow); kvfree(attr->parse_attr); } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); + + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) + mlx5_fc_destroy(esw->dev, attr->counter); } void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_esw_flow_attr slow_attr, *esw_attr; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; struct mlx5e_tc_flow *flow; int err; - err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, - e->encap_size, e->encap_header, - &e->encap_id); + err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type, + e->encap_size, e->encap_header, + MLX5_FLOW_NAMESPACE_FDB, + &e->encap_id); if (err) { mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n", err); @@ -942,26 +1049,20 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, list_for_each_entry(flow, &e->flows, encap) { esw_attr = flow->esw_attr; esw_attr->encap_id = e->encap_id; - flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr); - if (IS_ERR(flow->rule[0])) { - err = PTR_ERR(flow->rule[0]); + spec = &esw_attr->parse_attr->spec; + + /* update from slow path rule to encap rule */ + rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", err); continue; } - if (esw_attr->mirror_count) { - flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr); - if (IS_ERR(flow->rule[1])) { - mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr); - err = PTR_ERR(flow->rule[1]); - mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n", - err); - continue; - } - } - - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; + mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); + flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */ + flow->rule[0] = rule; } } @@ -969,25 +1070,44 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_esw_flow_attr slow_attr; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; struct mlx5e_tc_flow *flow; + int err; list_for_each_entry(flow, &e->flows, encap) { - if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { - struct mlx5_esw_flow_attr *attr = flow->esw_attr; + spec = &flow->esw_attr->parse_attr->spec; - flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; - if (attr->mirror_count) - mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr); - mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); + /* update from encap rule to slow path rule */ + rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr); + + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n", + err); + continue; } + + mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr); + flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */ + flow->rule[0] = rule; } if (e->flags & MLX5_ENCAP_ENTRY_VALID) { e->flags &= ~MLX5_ENCAP_ENTRY_VALID; - mlx5_encap_dealloc(priv->mdev, e->encap_id); + mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id); } } +static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) +{ + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + return flow->esw_attr->counter; + else + return flow->nic_attr->counter; +} + void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) { struct mlx5e_neigh *m_neigh = &nhe->m_neigh; @@ -1013,7 +1133,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) continue; list_for_each_entry(flow, &e->flows, encap) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { - counter = mlx5_flow_rule_counter(flow->rule[0]); + counter = mlx5e_tc_get_counter(flow); mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { neigh_used = true; @@ -1053,7 +1173,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); if (e->flags & MLX5_ENCAP_ENTRY_VALID) - mlx5_encap_dealloc(priv->mdev, e->encap_id); + mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id); hash_del_rcu(&e->encap_hlist); kfree(e->encap_header); @@ -1105,6 +1225,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f) { + struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, @@ -1133,6 +1254,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) parse_vxlan_attr(spec, f); else { + NL_SET_ERR_MSG_MOD(extack, + "port isn't an offloaded vxlan udp dport"); netdev_warn(priv->netdev, "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst)); return -EOPNOTSUPP; @@ -1149,6 +1272,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, udp_sport, ntohs(key->src)); } else { /* udp dst port must be given */ vxlan_match_offload_err: + NL_SET_ERR_MSG_MOD(extack, + "IP tunnel decap offload supported only for vxlan, must set UDP dport"); netdev_warn(priv->netdev, "IP tunnel decap offload supported only for vxlan, must set UDP dport\n"); return -EOPNOTSUPP; @@ -1225,6 +1350,16 @@ vxlan_match_offload_err: MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl); + + if (mask->ttl && + !MLX5_CAP_ESW_FLOWTABLE_FDB + (priv->mdev, + ft_field_support.outer_ipv4_ttl)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on TTL is not supported"); + return -EOPNOTSUPP; + } + } /* Enforce DMAC when offloading incoming tunneled flows. @@ -1247,6 +1382,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, u8 *match_level) { + struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, @@ -1277,6 +1413,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_TCP) | BIT(FLOW_DISSECTOR_KEY_IP) | BIT(FLOW_DISSECTOR_KEY_ENC_IP))) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", f->dissector->used_keys); return -EOPNOTSUPP; @@ -1553,8 +1690,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, if (mask->ttl && !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, - ft_field_support.outer_ipv4_ttl)) + ft_field_support.outer_ipv4_ttl)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on TTL is not supported"); return -EOPNOTSUPP; + } if (mask->tos || mask->ttl) *match_level = MLX5_MATCH_L3; @@ -1596,6 +1736,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, udp_dport, ntohs(key->dst)); break; default: + NL_SET_ERR_MSG_MOD(extack, + "Only UDP and TCP transports are supported for L4 matching"); netdev_err(priv->netdev, "Only UDP and TCP transport are supported\n"); return -EINVAL; @@ -1632,6 +1774,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f) { + struct netlink_ext_ack *extack = f->common.extack; struct mlx5_core_dev *dev = priv->mdev; struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -1646,6 +1789,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv, if (rep->vport != FDB_UPLINK_VPORT && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && esw->offloads.inline_mode < match_level)) { + NL_SET_ERR_MSG_MOD(extack, + "Flow is not offloaded due to min inline setting"); netdev_warn(priv->netdev, "Flow is not offloaded due to min inline setting, required %d actual %d\n", match_level, esw->offloads.inline_mode); @@ -1747,7 +1892,8 @@ static struct mlx5_fields fields[] = { */ static int offload_pedit_fields(struct pedit_headers *masks, struct pedit_headers *vals, - struct mlx5e_tc_flow_parse_attr *parse_attr) + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct netlink_ext_ack *extack) { struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; int i, action_size, nactions, max_actions, first, last, next_z; @@ -1786,11 +1932,15 @@ static int offload_pedit_fields(struct pedit_headers *masks, continue; if (s_mask && a_mask) { + NL_SET_ERR_MSG_MOD(extack, + "can't set and add to the same HW field"); printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field); return -EOPNOTSUPP; } if (nactions == max_actions) { + NL_SET_ERR_MSG_MOD(extack, + "too many pedit actions, can't offload"); printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions); return -EOPNOTSUPP; } @@ -1823,6 +1973,8 @@ static int offload_pedit_fields(struct pedit_headers *masks, next_z = find_next_zero_bit(&mask, field_bsize, first); last = find_last_bit(&mask, field_bsize); if (first < next_z && next_z < last) { + NL_SET_ERR_MSG_MOD(extack, + "rewrite of few sub-fields isn't supported"); printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", mask); return -EOPNOTSUPP; @@ -1881,7 +2033,8 @@ static const struct pedit_headers zero_masks = {}; static int parse_tc_pedit_action(struct mlx5e_priv *priv, const struct tc_action *a, int namespace, - struct mlx5e_tc_flow_parse_attr *parse_attr) + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct netlink_ext_ack *extack) { struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks; int nkeys, i, err = -EOPNOTSUPP; @@ -1899,12 +2052,13 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, err = -EOPNOTSUPP; /* can't be all optimistic */ if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) { - netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n"); + NL_SET_ERR_MSG_MOD(extack, + "legacy pedit isn't offloaded"); goto out_err; } if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) { - netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd); + NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded"); goto out_err; } @@ -1921,13 +2075,15 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, if (err) goto out_err; - err = offload_pedit_fields(masks, vals, parse_attr); + err = offload_pedit_fields(masks, vals, parse_attr, extack); if (err < 0) goto out_dealloc_parsed_actions; for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { cmd_masks = &masks[cmd]; if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { + NL_SET_ERR_MSG_MOD(extack, + "attempt to offload an unsupported field"); netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd); print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS, 16, 1, cmd_masks, sizeof(zero_masks), true); @@ -1944,19 +2100,26 @@ out_err: return err; } -static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags) +static bool csum_offload_supported(struct mlx5e_priv *priv, + u32 action, + u32 update_flags, + struct netlink_ext_ack *extack) { u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP; /* The HW recalcs checksums only if re-writing headers */ if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) { + NL_SET_ERR_MSG_MOD(extack, + "TC csum action is only offloaded with pedit"); netdev_warn(priv->netdev, "TC csum action is only offloaded with pedit\n"); return false; } if (update_flags & ~prot_flags) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload TC csum action for some header/s"); netdev_warn(priv->netdev, "can't offload TC csum action for some header/s - flags %#x\n", update_flags); @@ -1967,7 +2130,8 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda } static bool modify_header_match_supported(struct mlx5_flow_spec *spec, - struct tcf_exts *exts) + struct tcf_exts *exts, + struct netlink_ext_ack *extack) { const struct tc_action *a; bool modify_ip_header; @@ -2005,6 +2169,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload re-write of non TCP/UDP"); pr_info("can't offload re-write of ip proto %d\n", ip_proto); return false; } @@ -2016,7 +2182,8 @@ out_ok: static bool actions_match_supported(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { u32 actions; @@ -2030,7 +2197,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, return false; if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - return modify_header_match_supported(&parse_attr->spec, exts); + return modify_header_match_supported(&parse_attr->spec, exts, + extack); return true; } @@ -2043,15 +2211,16 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) fmdev = priv->mdev; pmdev = peer_priv->mdev; - mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid); - mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid); + fsystem_guid = mlx5_query_nic_system_image_guid(fmdev); + psystem_guid = mlx5_query_nic_system_image_guid(pmdev); return (fsystem_guid == psystem_guid); } static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_nic_flow_attr *attr = flow->nic_attr; const struct tc_action *a; @@ -2075,7 +2244,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_pedit(a)) { err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL, - parse_attr); + parse_attr, extack); if (err) return err; @@ -2086,7 +2255,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_csum(a)) { if (csum_offload_supported(priv, action, - tcf_csum_update_flags(a))) + tcf_csum_update_flags(a), + extack)) continue; return -EOPNOTSUPP; @@ -2102,6 +2272,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; } else { + NL_SET_ERR_MSG_MOD(extack, + "device is not on same HW, can't offload"); netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n", peer_dev->name); return -EINVAL; @@ -2113,8 +2285,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, u32 mark = tcf_skbedit_mark(a); if (mark & ~MLX5E_TC_FLOW_ID_MASK) { - netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n", - mark); + NL_SET_ERR_MSG_MOD(extack, + "Bad flow mark - only 16 bit is supported"); return -EINVAL; } @@ -2127,7 +2299,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } attr->action = action; - if (!actions_match_supported(priv, exts, parse_attr, flow)) + if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) return -EOPNOTSUPP; return 0; @@ -2331,7 +2503,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, return -ENOMEM; switch (e->tunnel_type) { - case MLX5_HEADER_TYPE_VXLAN: + case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: fl4.flowi4_proto = IPPROTO_UDP; fl4.fl4_dport = tun_key->tp_dst; break; @@ -2375,7 +2547,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, read_unlock_bh(&n->lock); switch (e->tunnel_type) { - case MLX5_HEADER_TYPE_VXLAN: + case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: gen_vxlan_header_ipv4(out_dev, encap_header, ipv4_encap_size, e->h_dest, tos, ttl, fl4.daddr, @@ -2395,8 +2567,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, goto out; } - err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, - ipv4_encap_size, encap_header, &e->encap_id); + err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type, + ipv4_encap_size, encap_header, + MLX5_FLOW_NAMESPACE_FDB, + &e->encap_id); if (err) goto destroy_neigh_entry; @@ -2440,7 +2614,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, return -ENOMEM; switch (e->tunnel_type) { - case MLX5_HEADER_TYPE_VXLAN: + case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: fl6.flowi6_proto = IPPROTO_UDP; fl6.fl6_dport = tun_key->tp_dst; break; @@ -2484,7 +2658,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, read_unlock_bh(&n->lock); switch (e->tunnel_type) { - case MLX5_HEADER_TYPE_VXLAN: + case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: gen_vxlan_header_ipv6(out_dev, encap_header, ipv6_encap_size, e->h_dest, tos, ttl, &fl6.daddr, @@ -2505,8 +2679,10 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, goto out; } - err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, - ipv6_encap_size, encap_header, &e->encap_id); + err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type, + ipv6_encap_size, encap_header, + MLX5_FLOW_NAMESPACE_FDB, + &e->encap_id); if (err) goto destroy_neigh_entry; @@ -2529,7 +2705,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct ip_tunnel_info *tun_info, struct net_device *mirred_dev, struct net_device **encap_dev, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; unsigned short family = ip_tunnel_info_af(tun_info); @@ -2547,6 +2724,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, /* setting udp src port isn't supported */ if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) { vxlan_encap_offload_err: + NL_SET_ERR_MSG_MOD(extack, + "must set udp dst port and not set udp src port"); netdev_warn(priv->netdev, "must set udp dst port and not set udp src port\n"); return -EOPNOTSUPP; @@ -2554,8 +2733,10 @@ vxlan_encap_offload_err: if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { - tunnel_type = MLX5_HEADER_TYPE_VXLAN; + tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN; } else { + NL_SET_ERR_MSG_MOD(extack, + "port isn't an offloaded vxlan udp dport"); netdev_warn(priv->netdev, "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst)); return -EOPNOTSUPP; @@ -2660,8 +2841,10 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5e_rep_priv *rpriv = priv->ppriv; struct ip_tunnel_info *info = NULL; @@ -2686,7 +2869,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_pedit(a)) { err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB, - parse_attr); + parse_attr, extack); if (err) return err; @@ -2697,7 +2880,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_csum(a)) { if (csum_offload_supported(priv, action, - tcf_csum_update_flags(a))) + tcf_csum_update_flags(a), + extack)) continue; return -EOPNOTSUPP; @@ -2710,6 +2894,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, out_dev = tcf_mirred_dev(a); if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { + NL_SET_ERR_MSG_MOD(extack, + "can't support more output ports, can't offload forwarding"); pr_err("can't support more than %d output ports, can't offload forwarding\n", attr->out_count); return -EOPNOTSUPP; @@ -2728,11 +2914,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, parse_attr->mirred_ifindex = out_dev->ifindex; parse_attr->tun_info = *info; attr->parse_attr = parse_attr; - action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | + action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; /* attr->out_rep is resolved when we handle encap */ } else { + NL_SET_ERR_MSG_MOD(extack, + "devices are not on same switch HW, can't offload forwarding"); pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", priv->netdev->name, out_dev->name); return -EINVAL; @@ -2765,14 +2953,35 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, continue; } + if (is_tcf_gact_goto_chain(a)) { + u32 dest_chain = tcf_gact_goto_chain_index(a); + u32 max_chain = mlx5_eswitch_get_chain_range(esw); + + if (dest_chain <= attr->chain) { + NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported"); + return -EOPNOTSUPP; + } + if (dest_chain > max_chain) { + NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range"); + return -EOPNOTSUPP; + } + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->dest_chain = dest_chain; + + continue; + } + return -EINVAL; } attr->action = action; - if (!actions_match_supported(priv, exts, parse_attr, flow)) + if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) return -EOPNOTSUPP; if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { + NL_SET_ERR_MSG_MOD(extack, + "current firmware doesn't support split rule for port mirroring"); netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); return -EOPNOTSUPP; } @@ -2780,9 +2989,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return 0; } -static void get_flags(int flags, u8 *flow_flags) +static void get_flags(int flags, u16 *flow_flags) { - u8 __flow_flags = 0; + u16 __flow_flags = 0; if (flags & MLX5E_TC_INGRESS) __flow_flags |= MLX5E_TC_FLOW_INGRESS; @@ -2811,31 +3020,15 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv) return &priv->fs.tc.ht; } -int mlx5e_configure_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f, int flags) +static int +mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, + struct tc_cls_flower_offload *f, u16 flow_flags, + struct mlx5e_tc_flow_parse_attr **__parse_attr, + struct mlx5e_tc_flow **__flow) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; - struct rhashtable *tc_ht = get_tc_ht(priv); struct mlx5e_tc_flow *flow; - int attr_size, err = 0; - u8 flow_flags = 0; - - get_flags(flags, &flow_flags); - - flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); - if (flow) { - netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie); - return 0; - } - - if (esw && esw->mode == SRIOV_OFFLOADS) { - flow_flags |= MLX5E_TC_FLOW_ESWITCH; - attr_size = sizeof(struct mlx5_esw_flow_attr); - } else { - flow_flags |= MLX5E_TC_FLOW_NIC; - attr_size = sizeof(struct mlx5_nic_flow_attr); - } + int err; flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL); parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); @@ -2849,45 +3042,161 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, flow->priv = priv; err = parse_cls_flower(priv, flow, &parse_attr->spec, f); - if (err < 0) + if (err) goto err_free; - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { - err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); - if (err < 0) - goto err_free; - flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); - } else { - err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); - if (err < 0) - goto err_free; - flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow); - } + *__flow = flow; + *__parse_attr = parse_attr; - if (IS_ERR(flow->rule[0])) { - err = PTR_ERR(flow->rule[0]); - if (err != -EAGAIN) - goto err_free; - } + return 0; - if (err != -EAGAIN) - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; +err_free: + kfree(flow); + kvfree(parse_attr); + return err; +} + +static int +mlx5e_add_fdb_flow(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f, + u16 flow_flags, + struct mlx5e_tc_flow **__flow) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5e_tc_flow *flow; + int attr_size, err; - if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) || - !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) + flow_flags |= MLX5E_TC_FLOW_ESWITCH; + attr_size = sizeof(struct mlx5_esw_flow_attr); + err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, + &parse_attr, &flow); + if (err) + goto out; + + flow->esw_attr->chain = f->common.chain_index; + flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; + err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack); + if (err) + goto err_free; + + err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack); + if (err) + goto err_free; + + if (!(flow->esw_attr->action & + MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)) kvfree(parse_attr); - err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params); - if (err) { - mlx5e_tc_del_flow(priv, flow); - kfree(flow); - } + *__flow = flow; + + return 0; +err_free: + kfree(flow); + kvfree(parse_attr); +out: return err; +} + +static int +mlx5e_add_nic_flow(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f, + u16 flow_flags, + struct mlx5e_tc_flow **__flow) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5e_tc_flow *flow; + int attr_size, err; + + /* multi-chain not supported for NIC rules */ + if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common)) + return -EOPNOTSUPP; + + flow_flags |= MLX5E_TC_FLOW_NIC; + attr_size = sizeof(struct mlx5_nic_flow_attr); + err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, + &parse_attr, &flow); + if (err) + goto out; + + err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack); + if (err) + goto err_free; + + err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack); + if (err) + goto err_free; + + flow->flags |= MLX5E_TC_FLOW_OFFLOADED; + kvfree(parse_attr); + *__flow = flow; + + return 0; err_free: + kfree(flow); kvfree(parse_attr); +out: + return err; +} + +static int +mlx5e_tc_add_flow(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f, + int flags, + struct mlx5e_tc_flow **flow) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + u16 flow_flags; + int err; + + get_flags(flags, &flow_flags); + + if (!tc_can_offload_extack(priv->netdev, f->common.extack)) + return -EOPNOTSUPP; + + if (esw && esw->mode == SRIOV_OFFLOADS) + err = mlx5e_add_fdb_flow(priv, f, flow_flags, flow); + else + err = mlx5e_add_nic_flow(priv, f, flow_flags, flow); + + return err; +} + +int mlx5e_configure_flower(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f, int flags) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct rhashtable *tc_ht = get_tc_ht(priv); + struct mlx5e_tc_flow *flow; + int err = 0; + + flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); + if (flow) { + NL_SET_ERR_MSG_MOD(extack, + "flow cookie already exists, ignoring"); + netdev_warn_once(priv->netdev, + "flow cookie %lx already exists, ignoring\n", + f->cookie); + goto out; + } + + err = mlx5e_tc_add_flow(priv, f, flags, &flow); + if (err) + goto out; + + err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params); + if (err) + goto err_free; + + return 0; + +err_free: + mlx5e_tc_del_flow(priv, flow); kfree(flow); +out: return err; } @@ -2938,7 +3247,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED)) return 0; - counter = mlx5_flow_rule_counter(flow->rule[0]); + counter = mlx5e_tc_get_counter(flow); if (!counter) return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index ae73ea992845..6dacaeba2fbf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -290,10 +290,9 @@ dma_unmap_wqe_err: static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq, - u16 pi, u16 frag_pi) + u16 pi, u16 nnops) { struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; - u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi; edge_wi = wi + nnops; @@ -348,8 +347,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe_info *wi; struct mlx5e_sq_stats *stats = sq->stats; + u16 headlen, ihs, contig_wqebbs_room; u16 ds_cnt, ds_cnt_inl = 0; - u16 headlen, ihs, frag_pi; u8 num_wqebbs, opcode; u32 num_bytes; int num_dma; @@ -386,9 +385,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, } num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); - frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); - if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { - mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); + contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); + if (unlikely(contig_wqebbs_room < num_wqebbs)) { + mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); mlx5e_sq_fetch_wqe(sq, &wqe, &pi); } @@ -636,7 +635,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe_info *wi; struct mlx5e_sq_stats *stats = sq->stats; - u16 headlen, ihs, pi, frag_pi; + u16 headlen, ihs, pi, contig_wqebbs_room; u16 ds_cnt, ds_cnt_inl = 0; u8 num_wqebbs, opcode; u32 num_bytes; @@ -672,13 +671,14 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, } num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); - frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); - if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); + if (unlikely(contig_wqebbs_room < num_wqebbs)) { + mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); - mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); } - mlx5i_sq_fetch_wqe(sq, &wqe, &pi); + mlx5i_sq_fetch_wqe(sq, &wqe, pi); /* fill wqe */ wi = &sq->db.wqe_info[pi]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 48864f4988a4..c1e1a16a9b07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -273,7 +273,7 @@ static void eq_pf_process(struct mlx5_eq *eq) case MLX5_PFAULT_SUBTYPE_WQE: /* WQE based event */ pfault->type = - be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24; + (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7; pfault->token = be32_to_cpu(pf_eqe->wqe.token); pfault->wqe.wq_num = diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index ea7dedc2d5ad..d004957328f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -263,7 +263,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) esw_debug(dev, "Create FDB log_max_size(%d)\n", MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); + root_ns = mlx5_get_fdb_sub_ns(dev, 0); if (!root_ns) { esw_warn(dev, "Failed to get FDB flow namespace\n"); return -EOPNOTSUPP; @@ -1198,7 +1198,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, if (counter) { flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - drop_ctr_dst.counter = counter; + drop_ctr_dst.counter_id = mlx5_fc_id(counter); dst = &drop_ctr_dst; dest_num++; } @@ -1285,7 +1285,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, if (counter) { flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - drop_ctr_dst.counter = counter; + drop_ctr_dst.counter_id = mlx5_fc_id(counter); dst = &drop_ctr_dst; dest_num++; } @@ -1746,7 +1746,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->enabled_vports = 0; esw->mode = SRIOV_NONE; esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; - if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) && + if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index c17bfcab517c..aaafc9f17115 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -59,6 +59,10 @@ #define mlx5_esw_has_fwd_fdb(dev) \ MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) +#define FDB_MAX_CHAIN 3 +#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) +#define FDB_MAX_PRIO 16 + struct vport_ingress { struct mlx5_flow_table *acl; struct mlx5_flow_group *allow_untagged_spoofchk_grp; @@ -120,6 +124,13 @@ struct mlx5_vport { u16 enabled_events; }; +enum offloads_fdb_flags { + ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0), +}; + +extern const unsigned int ESW_POOLS[4]; + +#define PRIO_LEVELS 2 struct mlx5_eswitch_fdb { union { struct legacy_fdb { @@ -130,16 +141,24 @@ struct mlx5_eswitch_fdb { } legacy; struct offloads_fdb { - struct mlx5_flow_table *fast_fdb; - struct mlx5_flow_table *fwd_fdb; struct mlx5_flow_table *slow_fdb; struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *miss_grp; struct mlx5_flow_handle *miss_rule_uni; struct mlx5_flow_handle *miss_rule_multi; int vlan_push_pop_refcount; + + struct { + struct mlx5_flow_table *fdb; + u32 num_rules; + } fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS]; + /* Protects fdb_prio table */ + struct mutex fdb_prio_lock; + + int fdb_left[ARRAY_SIZE(ESW_POOLS)]; } offloads; }; + u32 flags; }; struct mlx5_esw_offload { @@ -181,6 +200,7 @@ struct mlx5_eswitch { struct mlx5_esw_offload offloads; int mode; + int nvports; }; void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); @@ -228,9 +248,23 @@ void mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, struct mlx5_esw_flow_attr *attr); +void +mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_handle *rule, + struct mlx5_esw_flow_attr *attr); + +bool +mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw); + +u16 +mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw); + +u32 +mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw); struct mlx5_flow_handle * -mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); +mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, + struct mlx5_flow_destination *dest); enum { SET_VLAN_STRIP = BIT(0), @@ -265,15 +299,22 @@ struct mlx5_esw_flow_attr { u32 encap_id; u32 mod_hdr_id; u8 match_level; + struct mlx5_fc *counter; + u32 chain; + u16 prio; + u32 dest_chain; struct mlx5e_tc_flow_parse_attr *parse_attr; }; -int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); -int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode); +int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, + struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode); -int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap); +int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap, + struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); @@ -314,6 +355,11 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {} static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; } static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {} + +#define FDB_MAX_CHAIN 1 +#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) +#define FDB_MAX_PRIO 1 + #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 3028e8d90920..9eac137790f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -37,33 +37,59 @@ #include <linux/mlx5/fs.h> #include "mlx5_core.h" #include "eswitch.h" +#include "en.h" +#include "fs_core.h" enum { FDB_FAST_PATH = 0, FDB_SLOW_PATH }; +#define fdb_prio_table(esw, chain, prio, level) \ + (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)] + +static struct mlx5_flow_table * +esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level); +static void +esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level); + +bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw) +{ + return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)); +} + +u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw) +{ + if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) + return FDB_MAX_CHAIN; + + return 0; +} + +u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) +{ + if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) + return FDB_MAX_PRIO; + + return 1; +} + struct mlx5_flow_handle * mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_esw_flow_attr *attr) { struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; - struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_table *ft = NULL; - struct mlx5_fc *counter = NULL; + struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; + bool mirror = !!(attr->mirror_count); struct mlx5_flow_handle *rule; + struct mlx5_flow_table *fdb; int j, i = 0; void *misc; if (esw->mode != SRIOV_OFFLOADS) return ERR_PTR(-EOPNOTSUPP); - if (attr->mirror_count) - ft = esw->fdb_table.offloads.fwd_fdb; - else - ft = esw->fdb_table.offloads.fast_fdb; - flow_act.action = attr->action; /* if per flow vlan pop/push is emulated, don't set that into the firmware */ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) @@ -81,23 +107,33 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - for (j = attr->mirror_count; j < attr->out_count; j++) { - dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest[i].vport.num = attr->out_rep[j]->vport; - dest[i].vport.vhca_id = - MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); - dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); + if (attr->dest_chain) { + struct mlx5_flow_table *ft; + + ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0); + if (IS_ERR(ft)) { + rule = ERR_CAST(ft); + goto err_create_goto_table; + } + + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[i].ft = ft; i++; + } else { + for (j = attr->mirror_count; j < attr->out_count; j++) { + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest[i].vport.num = attr->out_rep[j]->vport; + dest[i].vport.vhca_id = + MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); + dest[i].vport.vhca_id_valid = + !!MLX5_CAP_ESW(esw->dev, merged_eswitch); + i++; + } } } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { - counter = mlx5_fc_create(esw->dev, true); - if (IS_ERR(counter)) { - rule = ERR_CAST(counter); - goto err_counter_alloc; - } dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[i].counter = counter; + dest[i].counter_id = mlx5_fc_id(attr->counter); i++; } @@ -127,10 +163,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_id = attr->mod_hdr_id; - if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) - flow_act.encap_id = attr->encap_id; + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) + flow_act.reformat_id = attr->encap_id; - rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i); + fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!mirror); + if (IS_ERR(fdb)) { + rule = ERR_CAST(fdb); + goto err_esw_get; + } + + rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i); if (IS_ERR(rule)) goto err_add_rule; else @@ -139,8 +181,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, return rule; err_add_rule: - mlx5_fc_destroy(esw->dev, counter); -err_counter_alloc: + esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror); +err_esw_get: + if (attr->dest_chain) + esw_put_prio_table(esw, attr->dest_chain, 1, 0); +err_create_goto_table: return rule; } @@ -150,11 +195,25 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr) { struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; - struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; + struct mlx5_flow_table *fast_fdb; + struct mlx5_flow_table *fwd_fdb; struct mlx5_flow_handle *rule; void *misc; int i; + fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0); + if (IS_ERR(fast_fdb)) { + rule = ERR_CAST(fast_fdb); + goto err_get_fast; + } + + fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1); + if (IS_ERR(fwd_fdb)) { + rule = ERR_CAST(fwd_fdb); + goto err_get_fwd; + } + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; for (i = 0; i < attr->mirror_count; i++) { dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; @@ -164,7 +223,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); } dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest[i].ft = esw->fdb_table.offloads.fwd_fdb, + dest[i].ft = fwd_fdb, i++; misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); @@ -187,25 +246,57 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS; - rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i); + rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); - if (!IS_ERR(rule)) - esw->offloads.num_flows++; + if (IS_ERR(rule)) + goto add_err; + esw->offloads.num_flows++; + + return rule; +add_err: + esw_put_prio_table(esw, attr->chain, attr->prio, 1); +err_get_fwd: + esw_put_prio_table(esw, attr->chain, attr->prio, 0); +err_get_fast: return rule; } +static void +__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_handle *rule, + struct mlx5_esw_flow_attr *attr, + bool fwd_rule) +{ + bool mirror = (attr->mirror_count > 0); + + mlx5_del_flow_rules(rule); + esw->offloads.num_flows--; + + if (fwd_rule) { + esw_put_prio_table(esw, attr->chain, attr->prio, 1); + esw_put_prio_table(esw, attr->chain, attr->prio, 0); + } else { + esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror); + if (attr->dest_chain) + esw_put_prio_table(esw, attr->dest_chain, 1, 0); + } +} + void mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, struct mlx5_esw_flow_attr *attr) { - struct mlx5_fc *counter = NULL; + __mlx5_eswitch_del_rule(esw, rule, attr, false); +} - counter = mlx5_flow_rule_counter(rule); - mlx5_del_flow_rules(rule); - mlx5_fc_destroy(esw->dev, counter); - esw->offloads.num_flows--; +void +mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_handle *rule, + struct mlx5_esw_flow_attr *attr) +{ + __mlx5_eswitch_del_rule(esw, rule, attr, true); } static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) @@ -294,7 +385,8 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); - fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); + fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && + !attr->dest_chain); err = esw_add_vlan_action_check(attr, push, pop, fwd); if (err) @@ -501,74 +593,170 @@ out: #define ESW_OFFLOADS_NUM_GROUPS 4 -static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) +/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS), + * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated + * for each flow table pool. We can allocate up to 16M of each pool, + * and we keep track of how much we used via put/get_sz_to_pool. + * Firmware doesn't report any of this for now. + * ESW_POOL is expected to be sorted from large to small + */ +#define ESW_SIZE (16 * 1024 * 1024) +const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024, + 64 * 1024, 4 * 1024 }; + +static int +get_sz_from_pool(struct mlx5_eswitch *esw) +{ + int sz = 0, i; + + for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) { + if (esw->fdb_table.offloads.fdb_left[i]) { + --esw->fdb_table.offloads.fdb_left[i]; + sz = ESW_POOLS[i]; + break; + } + } + + return sz; +} + +static void +put_sz_to_pool(struct mlx5_eswitch *esw, int sz) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) { + if (sz >= ESW_POOLS[i]) { + ++esw->fdb_table.offloads.fdb_left[i]; + break; + } + } +} + +static struct mlx5_flow_table * +create_next_size_table(struct mlx5_eswitch *esw, + struct mlx5_flow_namespace *ns, + u16 table_prio, + int level, + u32 flags) +{ + struct mlx5_flow_table *fdb; + int sz; + + sz = get_sz_from_pool(esw); + if (!sz) + return ERR_PTR(-ENOSPC); + + fdb = mlx5_create_auto_grouped_flow_table(ns, + table_prio, + sz, + ESW_OFFLOADS_NUM_GROUPS, + level, + flags); + if (IS_ERR(fdb)) { + esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n", + (int)PTR_ERR(fdb), table_prio, level, sz); + put_sz_to_pool(esw, sz); + } + + return fdb; +} + +static struct mlx5_flow_table * +esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) { struct mlx5_core_dev *dev = esw->dev; - struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *fdb = NULL; - int esw_size, err = 0; + struct mlx5_flow_namespace *ns; + int table_prio, l = 0; u32 flags = 0; - u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | - MLX5_CAP_GEN(dev, max_flow_counter_15_0); - root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); - if (!root_ns) { - esw_warn(dev, "Failed to get FDB flow namespace\n"); - err = -EOPNOTSUPP; - goto out_namespace; - } + if (chain == FDB_SLOW_PATH_CHAIN) + return esw->fdb_table.offloads.slow_fdb; - esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), - max_flow_counter, ESW_OFFLOADS_NUM_GROUPS); + mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); - esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS, - 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + fdb = fdb_prio_table(esw, chain, prio, level).fdb; + if (fdb) { + /* take ref on earlier levels as well */ + while (level >= 0) + fdb_prio_table(esw, chain, prio, level--).num_rules++; + mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); + return fdb; + } - if (mlx5_esw_has_fwd_fdb(dev)) - esw_size >>= 1; + ns = mlx5_get_fdb_sub_ns(dev, chain); + if (!ns) { + esw_warn(dev, "Failed to get FDB sub namespace\n"); + mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); + return ERR_PTR(-EOPNOTSUPP); + } if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) - flags |= MLX5_FLOW_TABLE_TUNNEL_EN; + flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | + MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); - fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, - esw_size, - ESW_OFFLOADS_NUM_GROUPS, 0, - flags); - if (IS_ERR(fdb)) { - err = PTR_ERR(fdb); - esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err); - goto out_namespace; - } - esw->fdb_table.offloads.fast_fdb = fdb; + table_prio = (chain * FDB_MAX_PRIO) + prio - 1; + + /* create earlier levels for correct fs_core lookup when + * connecting tables + */ + for (l = 0; l <= level; l++) { + if (fdb_prio_table(esw, chain, prio, l).fdb) { + fdb_prio_table(esw, chain, prio, l).num_rules++; + continue; + } - if (!mlx5_esw_has_fwd_fdb(dev)) - goto out_namespace; + fdb = create_next_size_table(esw, ns, table_prio, l, flags); + if (IS_ERR(fdb)) { + l--; + goto err_create_fdb; + } - fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, - esw_size, - ESW_OFFLOADS_NUM_GROUPS, 1, - flags); - if (IS_ERR(fdb)) { - err = PTR_ERR(fdb); - esw_warn(dev, "Failed to create fwd table err %d\n", err); - goto out_ft; + fdb_prio_table(esw, chain, prio, l).fdb = fdb; + fdb_prio_table(esw, chain, prio, l).num_rules = 1; } - esw->fdb_table.offloads.fwd_fdb = fdb; - return err; + mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); + return fdb; -out_ft: - mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); -out_namespace: - return err; +err_create_fdb: + mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); + if (l >= 0) + esw_put_prio_table(esw, chain, prio, l); + + return fdb; +} + +static void +esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) +{ + int l; + + if (chain == FDB_SLOW_PATH_CHAIN) + return; + + mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); + + for (l = level; l >= 0; l--) { + if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0) + continue; + + put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte); + mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb); + fdb_prio_table(esw, chain, prio, l).fdb = NULL; + } + + mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); } -static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw) +static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw) { - if (mlx5_esw_has_fwd_fdb(esw->dev)) - mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb); - mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); + /* If lazy creation isn't supported, deref the fast path tables */ + if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) { + esw_put_prio_table(esw, 0, 1, 1); + esw_put_prio_table(esw, 0, 1, 0); + } } #define MAX_PF_SQ 256 @@ -579,12 +767,13 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_core_dev *dev = esw->dev; + u32 *flow_group_in, max_flow_counter; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *fdb = NULL; - int table_size, ix, err = 0; + int table_size, ix, err = 0, i; struct mlx5_flow_group *g; + u32 flags = 0, fdb_max; void *match_criteria; - u32 *flow_group_in; u8 *dmac; esw_debug(esw->dev, "Create offloads FDB Tables\n"); @@ -599,12 +788,29 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) goto ns_err; } - err = esw_create_offloads_fast_fdb_table(esw); - if (err) - goto fast_fdb_err; + max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | + MLX5_CAP_GEN(dev, max_flow_counter_15_0); + fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); + + esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n", + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), + max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, + fdb_max); + + for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) + esw->fdb_table.offloads.fdb_left[i] = + ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0; table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2; + /* create the slow path fdb with encap set, so further table instances + * can be created at run time while VFs are probed if the FW allows that. + */ + if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) + flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | + MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); + + ft_attr.flags = flags; ft_attr.max_fte = table_size; ft_attr.prio = FDB_SLOW_PATH; @@ -616,6 +822,18 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) } esw->fdb_table.offloads.slow_fdb = fdb; + /* If lazy creation isn't supported, open the fast path tables now */ + if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) && + esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { + esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; + esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n"); + esw_get_prio_table(esw, 0, 1, 0); + esw_get_prio_table(esw, 0, 1, 1); + } else { + esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n"); + esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; + } + /* create send-to-vport group */ memset(flow_group_in, 0, inlen); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, @@ -663,6 +881,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) if (err) goto miss_rule_err; + esw->nvports = nvports; kvfree(flow_group_in); return 0; @@ -671,10 +890,9 @@ miss_rule_err: miss_err: mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); send_vport_err: + esw_destroy_offloads_fast_fdb_tables(esw); mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); slow_fdb_err: - esw_destroy_offloads_fast_fdb_table(esw); -fast_fdb_err: ns_err: kvfree(flow_group_in); return err; @@ -682,7 +900,7 @@ ns_err: static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) { - if (!esw->fdb_table.offloads.fast_fdb) + if (!esw->fdb_table.offloads.slow_fdb) return; esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); @@ -692,7 +910,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); - esw_destroy_offloads_fast_fdb_table(esw); + esw_destroy_offloads_fast_fdb_tables(esw); } static int esw_create_offloads_table(struct mlx5_eswitch *esw) @@ -775,10 +993,10 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) } struct mlx5_flow_handle * -mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) +mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, + struct mlx5_flow_destination *dest) { struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle *flow_rule; struct mlx5_flow_spec *spec; void *misc; @@ -796,12 +1014,10 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; - dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - dest.tir_num = tirn; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, - &flow_act, &dest, 1); + &flow_act, dest, 1); if (IS_ERR(flow_rule)) { esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); goto out; @@ -812,29 +1028,35 @@ out: return flow_rule; } -static int esw_offloads_start(struct mlx5_eswitch *esw) +static int esw_offloads_start(struct mlx5_eswitch *esw, + struct netlink_ext_ack *extack) { int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; if (esw->mode != SRIOV_LEGACY) { - esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n"); + NL_SET_ERR_MSG_MOD(extack, + "Can't set offloads mode, SRIOV legacy not enabled"); return -EINVAL; } mlx5_eswitch_disable_sriov(esw); err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); if (err) { - esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch to offloads"); err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); - if (err1) - esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1); + if (err1) { + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch back to legacy"); + } } if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { if (mlx5_eswitch_inline_mode_get(esw, num_vfs, &esw->offloads.inline_mode)) { esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; - esw_warn(esw->dev, "Inline mode is different between vports\n"); + NL_SET_ERR_MSG_MOD(extack, + "Inline mode is different between vports"); } } return err; @@ -945,6 +1167,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) { int err; + mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); + err = esw_create_offloads_fdb_tables(esw, nvports); if (err) return err; @@ -975,17 +1199,20 @@ create_ft_err: return err; } -static int esw_offloads_stop(struct mlx5_eswitch *esw) +static int esw_offloads_stop(struct mlx5_eswitch *esw, + struct netlink_ext_ack *extack) { int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; mlx5_eswitch_disable_sriov(esw); err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); if (err) { - esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err); + NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); - if (err1) - esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); + if (err1) { + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch back to offloads"); + } } /* enable back PF RoCE */ @@ -1094,7 +1321,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink) return 0; } -int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); u16 cur_mlx5_mode, mlx5_mode = 0; @@ -1113,9 +1341,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) return 0; if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) - return esw_offloads_start(dev->priv.eswitch); + return esw_offloads_start(dev->priv.eswitch, extack); else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) - return esw_offloads_stop(dev->priv.eswitch); + return esw_offloads_stop(dev->priv.eswitch, extack); else return -EINVAL; } @@ -1132,7 +1360,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); } -int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) +int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -1149,14 +1378,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) return 0; /* fall through */ case MLX5_CAP_INLINE_MODE_L2: - esw_warn(dev, "Inline mode can't be set\n"); + NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); return -EOPNOTSUPP; case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: break; } if (esw->offloads.num_flows > 0) { - esw_warn(dev, "Can't set inline mode when flows are configured\n"); + NL_SET_ERR_MSG_MOD(extack, + "Can't set inline mode when flows are configured"); return -EOPNOTSUPP; } @@ -1167,8 +1397,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) for (vport = 1; vport < esw->enabled_vports; vport++) { err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); if (err) { - esw_warn(dev, "Failed to set min inline on vport %d\n", - vport); + NL_SET_ERR_MSG_MOD(extack, + "Failed to set min inline on vport"); goto revert_inline_mode; } } @@ -1234,7 +1464,8 @@ out: return 0; } -int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) +int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -1245,7 +1476,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) return err; if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && - (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || + (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) return -EOPNOTSUPP; @@ -1261,19 +1492,24 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) return 0; if (esw->offloads.num_flows > 0) { - esw_warn(dev, "Can't set encapsulation when flows are configured\n"); + NL_SET_ERR_MSG_MOD(extack, + "Can't set encapsulation when flows are configured"); return -EOPNOTSUPP; } - esw_destroy_offloads_fast_fdb_table(esw); + esw_destroy_offloads_fdb_tables(esw); esw->offloads.encap = encap; - err = esw_create_offloads_fast_fdb_table(esw); + + err = esw_create_offloads_fdb_tables(esw, esw->nvports); + if (err) { - esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err); + NL_SET_ERR_MSG_MOD(extack, + "Failed re-creating fast FDB table"); esw->offloads.encap = !encap; - (void)esw_create_offloads_fast_fdb_table(esw); + (void)esw_create_offloads_fdb_tables(esw, esw->nvports); } + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 5645a4facad2..515e3d6de051 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -245,7 +245,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, return ERR_PTR(res); } - /* Context will be freed by wait func after completion */ + /* Context should be freed by the caller after completion. */ return context; } @@ -418,10 +418,8 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags) cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP); cmd.flags = htonl(flags); context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd)); - if (IS_ERR(context)) { - err = PTR_ERR(context); - goto out; - } + if (IS_ERR(context)) + return PTR_ERR(context); err = mlx5_fpga_ipsec_cmd_wait(context); if (err) @@ -435,6 +433,7 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags) } out: + kfree(context); return err; } @@ -650,7 +649,7 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev, (match_criteria_enable & ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) || (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) || - flow_act->has_flow_tag) + (flow_act->flags & FLOW_ACT_HAS_TAG)) return false; return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 8e01f818021b..08a891f9aade 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -152,7 +152,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *next_ft, unsigned int *table_id, u32 flags) { - int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN); + int en_encap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT); + int en_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; int err; @@ -169,9 +170,9 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, } MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en, - en_encap_decap); - MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en, - en_encap_decap); + en_decap); + MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en, + en_encap); switch (op_mod) { case FS_FT_OP_MOD_NORMAL: @@ -343,7 +344,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag); MLX5_SET(flow_context, in_flow_context, action, fte->action.action); - MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id); + MLX5_SET(flow_context, in_flow_context, packet_reformat_id, + fte->action.reformat_id); MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->action.modify_id); @@ -417,7 +419,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, continue; MLX5_SET(flow_counter_list, in_dests, flow_counter_id, - dst->dest_attr.counter->id); + dst->dest_attr.counter_id); in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); list_size++; } @@ -594,62 +596,78 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, *bytes = MLX5_GET64(traffic_counter, stats, octets); } -int mlx5_encap_alloc(struct mlx5_core_dev *dev, - int header_type, - size_t size, - void *encap_header, - u32 *encap_id) +int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, + int reformat_type, + size_t size, + void *reformat_data, + enum mlx5_flow_namespace_type namespace, + u32 *packet_reformat_id) { - int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size); - u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)]; - void *encap_header_in; - void *header; + u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)]; + void *packet_reformat_context_in; + int max_encap_size; + void *reformat; int inlen; int err; u32 *in; + if (namespace == MLX5_FLOW_NAMESPACE_FDB) + max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size); + else + max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size); + if (size > max_encap_size) { mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n", size, max_encap_size); return -EINVAL; } - in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size, + in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size, GFP_KERNEL); if (!in) return -ENOMEM; - encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header); - header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header); - inlen = header - (void *)in + size; + packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in, + in, packet_reformat_context); + reformat = MLX5_ADDR_OF(packet_reformat_context_in, + packet_reformat_context_in, + reformat_data); + inlen = reformat - (void *)in + size; memset(in, 0, inlen); - MLX5_SET(alloc_encap_header_in, in, opcode, - MLX5_CMD_OP_ALLOC_ENCAP_HEADER); - MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size); - MLX5_SET(encap_header_in, encap_header_in, header_type, header_type); - memcpy(header, encap_header, size); + MLX5_SET(alloc_packet_reformat_context_in, in, opcode, + MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT); + MLX5_SET(packet_reformat_context_in, packet_reformat_context_in, + reformat_data_size, size); + MLX5_SET(packet_reformat_context_in, packet_reformat_context_in, + reformat_type, reformat_type); + memcpy(reformat, reformat_data, size); memset(out, 0, sizeof(out)); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); - *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id); + *packet_reformat_id = MLX5_GET(alloc_packet_reformat_context_out, + out, packet_reformat_id); kfree(in); return err; } +EXPORT_SYMBOL(mlx5_packet_reformat_alloc); -void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id) +void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, + u32 packet_reformat_id) { - u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)]; - u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)]; + u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)]; + u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)]; memset(in, 0, sizeof(in)); - MLX5_SET(dealloc_encap_header_in, in, opcode, - MLX5_CMD_OP_DEALLOC_ENCAP_HEADER); - MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id); + MLX5_SET(dealloc_packet_reformat_context_in, in, opcode, + MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT); + MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id, + packet_reformat_id); mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } +EXPORT_SYMBOL(mlx5_packet_reformat_dealloc); int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, u8 namespace, u8 num_actions, @@ -667,9 +685,14 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, table_type = FS_FT_FDB; break; case MLX5_FLOW_NAMESPACE_KERNEL: + case MLX5_FLOW_NAMESPACE_BYPASS: max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions); table_type = FS_FT_NIC_RX; break; + case MLX5_FLOW_NAMESPACE_EGRESS: + max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions); + table_type = FS_FT_NIC_TX; + break; default: return -EOPNOTSUPP; } @@ -702,6 +725,7 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, kfree(in); return err; } +EXPORT_SYMBOL(mlx5_modify_header_alloc); void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id) { @@ -716,6 +740,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id) mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } +EXPORT_SYMBOL(mlx5_modify_header_dealloc); static const struct mlx5_flow_cmds mlx5_flow_cmds = { .create_flow_table = mlx5_cmd_create_flow_table, @@ -760,8 +785,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ case FS_FT_FDB: case FS_FT_SNIFFER_RX: case FS_FT_SNIFFER_TX: - return mlx5_fs_cmd_get_fw_cmds(); case FS_FT_NIC_TX: + return mlx5_fs_cmd_get_fw_cmds(); default: return mlx5_fs_cmd_get_stub_cmds(); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 37d114c668b7..9d73eb955f75 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -40,6 +40,7 @@ #include "diag/fs_tracepoint.h" #include "accel/ipsec.h" #include "fpga/ipsec.h" +#include "eswitch.h" #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) @@ -76,6 +77,14 @@ FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \ FS_CAP(flow_table_properties_nic_receive.flow_table_modify)) +#define FS_CHAINING_CAPS_EGRESS \ + FS_REQUIRED_CAPS( \ + FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \ + FS_CAP(flow_table_properties_nic_transmit.modify_root), \ + FS_CAP(flow_table_properties_nic_transmit \ + .identified_miss_table_mode), \ + FS_CAP(flow_table_properties_nic_transmit.flow_table_modify)) + #define LEFTOVERS_NUM_LEVELS 1 #define LEFTOVERS_NUM_PRIOS 1 @@ -151,6 +160,17 @@ static struct init_tree_node { } }; +static struct init_tree_node egress_root_fs = { + .type = FS_TYPE_NAMESPACE, + .ar_size = 1, + .children = (struct init_tree_node[]) { + ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, + FS_CHAINING_CAPS_EGRESS, + ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + BY_PASS_PRIO_NUM_LEVELS))), + } +}; + enum fs_i_lock_class { FS_LOCK_GRANDPARENT, FS_LOCK_PARENT, @@ -694,7 +714,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, struct fs_node *iter = list_entry(start, struct fs_node, list); struct mlx5_flow_table *ft = NULL; - if (!root) + if (!root || root->type == FS_TYPE_PRIO_CHAINS) return NULL; list_for_each_advance_continue(iter, &root->children, reverse) { @@ -1388,7 +1408,7 @@ static bool check_conflicting_actions(u32 action1, u32 action2) return false; if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP | - MLX5_FLOW_CONTEXT_ACTION_ENCAP | + MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | MLX5_FLOW_CONTEXT_ACTION_DECAP | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | @@ -1408,7 +1428,7 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act return -EEXIST; } - if (flow_act->has_flow_tag && + if ((flow_act->flags & FLOW_ACT_HAS_TAG) && fte->action.flow_tag != flow_act->flow_tag) { mlx5_core_warn(get_dev(&fte->node), "FTE flow tag %u already exists with different flow tag %u\n", @@ -1455,29 +1475,8 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, return handle; } -struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle) +static bool counter_is_valid(u32 action) { - struct mlx5_flow_rule *dst; - struct fs_fte *fte; - - fs_get_obj(fte, handle->rule[0]->node.parent); - - fs_for_each_dst(dst, fte) { - if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) - return dst->dest_attr.counter; - } - - return NULL; -} - -static bool counter_is_valid(struct mlx5_fc *counter, u32 action) -{ - if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) - return !counter; - - if (!counter) - return false; - return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)); } @@ -1487,7 +1486,7 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest, struct mlx5_flow_table *ft) { if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)) - return counter_is_valid(dest->counter, action); + return counter_is_valid(action); if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) return true; @@ -1629,6 +1628,8 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, search_again_locked: version = matched_fgs_get_version(match_head); + if (flow_act->flags & FLOW_ACT_NO_APPEND) + goto skip_search; /* Try to find a fg that already contains a matching fte */ list_for_each_entry(iter, match_head, list) { struct fs_fte *fte_tmp; @@ -1645,6 +1646,11 @@ search_again_locked: return rule; } +skip_search: + /* No group with matching fte found, or we skipped the search. + * Try to add a new fte to any matching fg. + */ + /* Check the ft version, for case that new flow group * was added while the fgs weren't locked */ @@ -1975,12 +1981,24 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) fg->id); } +struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, + int n) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + + if (!steering || !steering->fdb_sub_ns) + return NULL; + + return steering->fdb_sub_ns[n]; +} +EXPORT_SYMBOL(mlx5_get_fdb_sub_ns); + struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type) { struct mlx5_flow_steering *steering = dev->priv.steering; struct mlx5_flow_root_namespace *root_ns; - int prio; + int prio = 0; struct fs_prio *fs_prio; struct mlx5_flow_namespace *ns; @@ -1988,40 +2006,29 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, return NULL; switch (type) { - case MLX5_FLOW_NAMESPACE_BYPASS: - case MLX5_FLOW_NAMESPACE_LAG: - case MLX5_FLOW_NAMESPACE_OFFLOADS: - case MLX5_FLOW_NAMESPACE_ETHTOOL: - case MLX5_FLOW_NAMESPACE_KERNEL: - case MLX5_FLOW_NAMESPACE_LEFTOVERS: - case MLX5_FLOW_NAMESPACE_ANCHOR: - prio = type; - break; case MLX5_FLOW_NAMESPACE_FDB: if (steering->fdb_root_ns) return &steering->fdb_root_ns->ns; - else - return NULL; + return NULL; case MLX5_FLOW_NAMESPACE_SNIFFER_RX: if (steering->sniffer_rx_root_ns) return &steering->sniffer_rx_root_ns->ns; - else - return NULL; + return NULL; case MLX5_FLOW_NAMESPACE_SNIFFER_TX: if (steering->sniffer_tx_root_ns) return &steering->sniffer_tx_root_ns->ns; - else - return NULL; - case MLX5_FLOW_NAMESPACE_EGRESS: - if (steering->egress_root_ns) - return &steering->egress_root_ns->ns; - else - return NULL; - default: return NULL; + default: + break; + } + + if (type == MLX5_FLOW_NAMESPACE_EGRESS) { + root_ns = steering->egress_root_ns; + } else { /* Must be NIC RX */ + root_ns = steering->root_ns; + prio = type; } - root_ns = steering->root_ns; if (!root_ns) return NULL; @@ -2064,8 +2071,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d } } -static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, - unsigned int prio, int num_levels) +static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns, + unsigned int prio, + int num_levels, + enum fs_node_type type) { struct fs_prio *fs_prio; @@ -2073,7 +2082,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, if (!fs_prio) return ERR_PTR(-ENOMEM); - fs_prio->node.type = FS_TYPE_PRIO; + fs_prio->node.type = type; tree_init_node(&fs_prio->node, NULL, del_sw_prio); tree_add_node(&fs_prio->node, &ns->node); fs_prio->num_levels = num_levels; @@ -2083,6 +2092,19 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, return fs_prio; } +static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns, + unsigned int prio, + int num_levels) +{ + return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS); +} + +static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, + unsigned int prio, int num_levels) +{ + return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO); +} + static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace *ns) { @@ -2387,6 +2409,9 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) cleanup_egress_acls_root_ns(dev); cleanup_ingress_acls_root_ns(dev); cleanup_root_ns(steering->fdb_root_ns); + steering->fdb_root_ns = NULL; + kfree(steering->fdb_sub_ns); + steering->fdb_sub_ns = NULL; cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns); cleanup_root_ns(steering->egress_root_ns); @@ -2432,27 +2457,64 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering) static int init_fdb_root_ns(struct mlx5_flow_steering *steering) { - struct fs_prio *prio; + struct mlx5_flow_namespace *ns; + struct fs_prio *maj_prio; + struct fs_prio *min_prio; + int levels; + int chain; + int prio; + int err; steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB); if (!steering->fdb_root_ns) return -ENOMEM; - prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2); - if (IS_ERR(prio)) + steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) * + (FDB_MAX_CHAIN + 1), GFP_KERNEL); + if (!steering->fdb_sub_ns) + return -ENOMEM; + + levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1); + maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, 0, + levels); + if (IS_ERR(maj_prio)) { + err = PTR_ERR(maj_prio); goto out_err; + } + + for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) { + ns = fs_create_namespace(maj_prio); + if (IS_ERR(ns)) { + err = PTR_ERR(ns); + goto out_err; + } + + for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) { + min_prio = fs_create_prio(ns, prio, 2); + if (IS_ERR(min_prio)) { + err = PTR_ERR(min_prio); + goto out_err; + } + } + + steering->fdb_sub_ns[chain] = ns; + } - prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1); - if (IS_ERR(prio)) + maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1); + if (IS_ERR(maj_prio)) { + err = PTR_ERR(maj_prio); goto out_err; + } set_prio_attrs(steering->fdb_root_ns); return 0; out_err: cleanup_root_ns(steering->fdb_root_ns); + kfree(steering->fdb_sub_ns); + steering->fdb_sub_ns = NULL; steering->fdb_root_ns = NULL; - return PTR_ERR(prio); + return err; } static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport) @@ -2537,16 +2599,23 @@ cleanup_root_ns: static int init_egress_root_ns(struct mlx5_flow_steering *steering) { - struct fs_prio *prio; + int err; steering->egress_root_ns = create_root_ns(steering, FS_FT_NIC_TX); if (!steering->egress_root_ns) return -ENOMEM; - /* create 1 prio*/ - prio = fs_create_prio(&steering->egress_root_ns->ns, 0, 1); - return PTR_ERR_OR_ZERO(prio); + err = init_root_tree(steering, &egress_root_fs, + &steering->egress_root_ns->ns.node); + if (err) + goto cleanup; + set_prio_attrs(steering->egress_root_ns); + return 0; +cleanup: + cleanup_root_ns(steering->egress_root_ns); + steering->egress_root_ns = NULL; + return err; } int mlx5_init_fs(struct mlx5_core_dev *dev) @@ -2614,7 +2683,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } - if (MLX5_IPSEC_DEV(dev)) { + if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) { err = init_egress_root_ns(steering); if (err) goto err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 32070e5d993d..b51ad217da32 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -36,10 +36,23 @@ #include <linux/refcount.h> #include <linux/mlx5/fs.h> #include <linux/rhashtable.h> +#include <linux/llist.h> + +/* FS_TYPE_PRIO_CHAINS is a PRIO that will have namespaces only, + * and those are in parallel to one another when going over them to connect + * a new flow table. Meaning the last flow table in a TYPE_PRIO prio in one + * parallel namespace will not automatically connect to the first flow table + * found in any prio in any next namespace, but skip the entire containing + * TYPE_PRIO_CHAINS prio. + * + * This is used to implement tc chains, each chain of prios is a different + * namespace inside a containing TYPE_PRIO_CHAINS prio. + */ enum fs_node_type { FS_TYPE_NAMESPACE, FS_TYPE_PRIO, + FS_TYPE_PRIO_CHAINS, FS_TYPE_FLOW_TABLE, FS_TYPE_FLOW_GROUP, FS_TYPE_FLOW_ENTRY, @@ -72,6 +85,7 @@ struct mlx5_flow_steering { struct kmem_cache *ftes_cache; struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *fdb_root_ns; + struct mlx5_flow_namespace **fdb_sub_ns; struct mlx5_flow_root_namespace **esw_egress_root_ns; struct mlx5_flow_root_namespace **esw_ingress_root_ns; struct mlx5_flow_root_namespace *sniffer_tx_root_ns; @@ -138,8 +152,9 @@ struct mlx5_fc_cache { }; struct mlx5_fc { - struct rb_node node; struct list_head list; + struct llist_node addlist; + struct llist_node dellist; /* last{packets,bytes} members are used when calculating the delta since * last reading @@ -148,7 +163,6 @@ struct mlx5_fc { u64 lastbytes; u32 id; - bool deleted; bool aging; struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 58af6be13dfa..32accd6b041b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -52,11 +52,13 @@ * access to counter list: * - create (user context) * - mlx5_fc_create() only adds to an addlist to be used by - * mlx5_fc_stats_query_work(). addlist is protected by a spinlock. + * mlx5_fc_stats_query_work(). addlist is a lockless single linked list + * that doesn't require any additional synchronization when adding single + * node. * - spawn thread to do the actual destroy * * - destroy (user context) - * - mark a counter as deleted + * - add a counter to lockless dellist * - spawn thread to do the actual del * * - dump (user context) @@ -71,36 +73,55 @@ * elapsed, the thread will actually query the hardware. */ -static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter) +static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev, + u32 id) { - struct rb_node **new = &root->rb_node; - struct rb_node *parent = NULL; - - while (*new) { - struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node); - int result = counter->id - this->id; - - parent = *new; - if (result < 0) - new = &((*new)->rb_left); - else - new = &((*new)->rb_right); - } + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + unsigned long next_id = (unsigned long)id + 1; + struct mlx5_fc *counter; + + rcu_read_lock(); + /* skip counters that are in idr, but not yet in counters list */ + while ((counter = idr_get_next_ul(&fc_stats->counters_idr, + &next_id)) != NULL && + list_empty(&counter->list)) + next_id++; + rcu_read_unlock(); + + return counter ? &counter->list : &fc_stats->counters; +} + +static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev, + struct mlx5_fc *counter) +{ + struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id); + + list_add_tail(&counter->list, next); +} + +static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev, + struct mlx5_fc *counter) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - /* Add new node and rebalance tree. */ - rb_link_node(&counter->node, parent, new); - rb_insert_color(&counter->node, root); + list_del(&counter->list); + + spin_lock(&fc_stats->counters_idr_lock); + WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id)); + spin_unlock(&fc_stats->counters_idr_lock); } -/* The function returns the last node that was queried so the caller +/* The function returns the last counter that was queried so the caller * function can continue calling it till all counters are queried. */ -static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, +static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev, struct mlx5_fc *first, u32 last_id) { + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc *counter = NULL; struct mlx5_cmd_fc_bulk *b; - struct rb_node *node = NULL; + bool more = false; u32 afirst_id; int num; int err; @@ -130,14 +151,16 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, goto out; } - for (node = &first->node; node; node = rb_next(node)) { - struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node); + counter = first; + list_for_each_entry_from(counter, &fc_stats->counters, list) { struct mlx5_fc_cache *c = &counter->cache; u64 packets; u64 bytes; - if (counter->id > last_id) + if (counter->id > last_id) { + more = true; break; + } mlx5_cmd_fc_bulk_get(dev, b, counter->id, &packets, &bytes); @@ -153,7 +176,14 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, out: mlx5_cmd_fc_bulk_free(b); - return node; + return more ? counter : NULL; +} + +static void mlx5_free_fc(struct mlx5_core_dev *dev, + struct mlx5_fc *counter) +{ + mlx5_cmd_fc_free(dev, counter->id); + kfree(counter); } static void mlx5_fc_stats_work(struct work_struct *work) @@ -161,52 +191,36 @@ static void mlx5_fc_stats_work(struct work_struct *work) struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, priv.fc_stats.work.work); struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + /* Take dellist first to ensure that counters cannot be deleted before + * they are inserted. + */ + struct llist_node *dellist = llist_del_all(&fc_stats->dellist); + struct llist_node *addlist = llist_del_all(&fc_stats->addlist); + struct mlx5_fc *counter = NULL, *last = NULL, *tmp; unsigned long now = jiffies; - struct mlx5_fc *counter = NULL; - struct mlx5_fc *last = NULL; - struct rb_node *node; - LIST_HEAD(tmplist); - spin_lock(&fc_stats->addlist_lock); - - list_splice_tail_init(&fc_stats->addlist, &tmplist); - - if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters)) + if (addlist || !list_empty(&fc_stats->counters)) queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval); - spin_unlock(&fc_stats->addlist_lock); - - list_for_each_entry(counter, &tmplist, list) - mlx5_fc_stats_insert(&fc_stats->counters, counter); + llist_for_each_entry(counter, addlist, addlist) + mlx5_fc_stats_insert(dev, counter); - node = rb_first(&fc_stats->counters); - while (node) { - counter = rb_entry(node, struct mlx5_fc, node); + llist_for_each_entry_safe(counter, tmp, dellist, dellist) { + mlx5_fc_stats_remove(dev, counter); - node = rb_next(node); - - if (counter->deleted) { - rb_erase(&counter->node, &fc_stats->counters); - - mlx5_cmd_fc_free(dev, counter->id); - - kfree(counter); - continue; - } - - last = counter; + mlx5_free_fc(dev, counter); } - if (time_before(now, fc_stats->next_query) || !last) + if (time_before(now, fc_stats->next_query) || + list_empty(&fc_stats->counters)) return; + last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list); - node = rb_first(&fc_stats->counters); - while (node) { - counter = rb_entry(node, struct mlx5_fc, node); - - node = mlx5_fc_stats_query(dev, counter, last->id); - } + counter = list_first_entry(&fc_stats->counters, struct mlx5_fc, + list); + while (counter) + counter = mlx5_fc_stats_query(dev, counter, last->id); fc_stats->next_query = now + fc_stats->sampling_interval; } @@ -220,24 +234,38 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) counter = kzalloc(sizeof(*counter), GFP_KERNEL); if (!counter) return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&counter->list); err = mlx5_cmd_fc_alloc(dev, &counter->id); if (err) goto err_out; if (aging) { + u32 id = counter->id; + counter->cache.lastuse = jiffies; counter->aging = true; - spin_lock(&fc_stats->addlist_lock); - list_add(&counter->list, &fc_stats->addlist); - spin_unlock(&fc_stats->addlist_lock); + idr_preload(GFP_KERNEL); + spin_lock(&fc_stats->counters_idr_lock); + + err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id, + GFP_NOWAIT); + + spin_unlock(&fc_stats->counters_idr_lock); + idr_preload_end(); + if (err) + goto err_out_alloc; + + llist_add(&counter->addlist, &fc_stats->addlist); mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); } return counter; +err_out_alloc: + mlx5_cmd_fc_free(dev, counter->id); err_out: kfree(counter); @@ -245,6 +273,12 @@ err_out: } EXPORT_SYMBOL(mlx5_fc_create); +u32 mlx5_fc_id(struct mlx5_fc *counter) +{ + return counter->id; +} +EXPORT_SYMBOL(mlx5_fc_id); + void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; @@ -253,13 +287,12 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) return; if (counter->aging) { - counter->deleted = true; + llist_add(&counter->dellist, &fc_stats->dellist); mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); return; } - mlx5_cmd_fc_free(dev, counter->id); - kfree(counter); + mlx5_free_fc(dev, counter); } EXPORT_SYMBOL(mlx5_fc_destroy); @@ -267,9 +300,11 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - fc_stats->counters = RB_ROOT; - INIT_LIST_HEAD(&fc_stats->addlist); - spin_lock_init(&fc_stats->addlist_lock); + spin_lock_init(&fc_stats->counters_idr_lock); + idr_init(&fc_stats->counters_idr); + INIT_LIST_HEAD(&fc_stats->counters); + init_llist_head(&fc_stats->addlist); + init_llist_head(&fc_stats->dellist); fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); if (!fc_stats->wq) @@ -284,34 +319,22 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct llist_node *tmplist; struct mlx5_fc *counter; struct mlx5_fc *tmp; - struct rb_node *node; cancel_delayed_work_sync(&dev->priv.fc_stats.work); destroy_workqueue(dev->priv.fc_stats.wq); dev->priv.fc_stats.wq = NULL; - list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) { - list_del(&counter->list); - - mlx5_cmd_fc_free(dev, counter->id); + idr_destroy(&fc_stats->counters_idr); - kfree(counter); - } - - node = rb_first(&fc_stats->counters); - while (node) { - counter = rb_entry(node, struct mlx5_fc, node); - - node = rb_next(node); + tmplist = llist_del_all(&fc_stats->addlist); + llist_for_each_entry_safe(counter, tmp, tmplist, addlist) + mlx5_free_fc(dev, counter); - rb_erase(&counter->node, &fc_stats->counters); - - mlx5_cmd_fc_free(dev, counter->id); - - kfree(counter); - } + list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list) + mlx5_free_fc(dev, counter); } int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 41ad24f0de2c..1ab6f7e3bec6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -250,7 +250,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) if (ret) return ret; - force_state = MLX5_GET(teardown_hca_out, out, force_state); + force_state = MLX5_GET(teardown_hca_out, out, state); if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n"); return -EIO; @@ -259,6 +259,54 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) return 0; } +#define MLX5_FAST_TEARDOWN_WAIT_MS 3000 +int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) +{ + unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS; + u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0}; + int state; + int ret; + + if (!MLX5_CAP_GEN(dev, fast_teardown)) { + mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n"); + return -EOPNOTSUPP; + } + + MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); + MLX5_SET(teardown_hca_in, in, profile, + MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN); + + ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (ret) + return ret; + + state = MLX5_GET(teardown_hca_out, out, state); + if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { + mlx5_core_warn(dev, "teardown with fast mode failed\n"); + return -EIO; + } + + mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED); + + /* Loop until device state turns to disable */ + end = jiffies + msecs_to_jiffies(delay_ms); + do { + if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) + break; + + cond_resched(); + } while (!time_after(jiffies, end)); + + if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { + dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n", + mlx5_get_nic_state(dev), delay_ms); + return -EIO; + } + + return 0; +} + enum mlxsw_reg_mcc_instruction { MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 9f39aeca863f..43118de8ee99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -59,22 +59,25 @@ enum { }; enum { - MLX5_NIC_IFC_FULL = 0, - MLX5_NIC_IFC_DISABLED = 1, - MLX5_NIC_IFC_NO_DRAM_NIC = 2, - MLX5_NIC_IFC_INVALID = 3 -}; - -enum { MLX5_DROP_NEW_HEALTH_WORK, MLX5_DROP_NEW_RECOVERY_WORK, }; -static u8 get_nic_state(struct mlx5_core_dev *dev) +u8 mlx5_get_nic_state(struct mlx5_core_dev *dev) { return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3; } +void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state) +{ + u32 cur_cmdq_addr_l_sz; + + cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz); + iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) | + state << MLX5_NIC_IFC_OFFSET, + &dev->iseg->cmdq_addr_l_sz); +} + static void trigger_cmd_completions(struct mlx5_core_dev *dev) { unsigned long flags; @@ -103,7 +106,7 @@ static int in_fatal(struct mlx5_core_dev *dev) struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; - if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) + if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) return 1; if (ioread32be(&h->fw_ver) == 0xffffffff) @@ -133,7 +136,7 @@ unlock: static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) { - u8 nic_interface = get_nic_state(dev); + u8 nic_interface = mlx5_get_nic_state(dev); switch (nic_interface) { case MLX5_NIC_IFC_FULL: @@ -168,7 +171,7 @@ static void health_recover(struct work_struct *work) priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); - nic_state = get_nic_state(dev); + nic_state = mlx5_get_nic_state(dev); if (nic_state == MLX5_NIC_IFC_INVALID) { dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n"); return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index e3797a44e074..b59953daf8b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -45,6 +45,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); static const struct net_device_ops mlx5i_netdev_ops = { .ndo_open = mlx5i_open, .ndo_stop = mlx5i_close, + .ndo_get_stats64 = mlx5i_get_stats, .ndo_init = mlx5i_dev_init, .ndo_uninit = mlx5i_dev_cleanup, .ndo_change_mtu = mlx5i_change_mtu, @@ -70,26 +71,25 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, } /* Called directly after IPoIB netdevice was created to initialize SW structs */ -void mlx5i_init(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv) +int mlx5i_init(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); u16 max_mtu; + int err; - /* priv init */ - priv->mdev = mdev; - priv->netdev = netdev; - priv->profile = profile; - priv->ppriv = ppriv; - mutex_init(&priv->state_lock); + err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); + if (err) + return err; mlx5_query_port_max_mtu(mdev, &max_mtu, 1); netdev->mtu = max_mtu; mlx5e_build_nic_params(mdev, &priv->channels.params, - profile->max_nch(mdev), netdev->mtu); + mlx5e_get_netdev_max_channels(netdev), + netdev->mtu); mlx5i_build_nic_params(mdev, &priv->channels.params); mlx5e_timestamp_init(priv); @@ -106,12 +106,56 @@ void mlx5i_init(struct mlx5_core_dev *mdev, netdev->netdev_ops = &mlx5i_netdev_ops; netdev->ethtool_ops = &mlx5i_ethtool_ops; + + return 0; } /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ -static void mlx5i_cleanup(struct mlx5e_priv *priv) +void mlx5i_cleanup(struct mlx5e_priv *priv) +{ + mlx5e_netdev_cleanup(priv->netdev, priv); +} + +static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) +{ + int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); + struct mlx5e_sw_stats s = { 0 }; + int i, j; + + for (i = 0; i < max_nch; i++) { + struct mlx5e_channel_stats *channel_stats; + struct mlx5e_rq_stats *rq_stats; + + channel_stats = &priv->channel_stats[i]; + rq_stats = &channel_stats->rq; + + s.rx_packets += rq_stats->packets; + s.rx_bytes += rq_stats->bytes; + + for (j = 0; j < priv->max_opened_tc; j++) { + struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; + + s.tx_packets += sq_stats->packets; + s.tx_bytes += sq_stats->bytes; + s.tx_queue_dropped += sq_stats->dropped; + } + } + + memcpy(&priv->stats.sw, &s, sizeof(s)); +} + +void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { - /* Do nothing .. */ + struct mlx5e_priv *priv = mlx5i_epriv(dev); + struct mlx5e_sw_stats *sstats = &priv->stats.sw; + + mlx5i_grp_sw_update_stats(priv); + + stats->rx_packets = sstats->rx_packets; + stats->rx_bytes = sstats->rx_bytes; + stats->tx_packets = sstats->tx_packets; + stats->tx_bytes = sstats->tx_bytes; + stats->tx_dropped = sstats->tx_queue_dropped; } int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) @@ -306,17 +350,26 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv) { + struct mlx5_core_dev *mdev = priv->mdev; int err; + mlx5e_create_q_counters(priv); + + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_destroy_q_counters; + } + err = mlx5e_create_indirect_rqt(priv); if (err) - return err; + goto err_close_drop_rq; err = mlx5e_create_direct_rqts(priv); if (err) goto err_destroy_indirect_rqts; - err = mlx5e_create_indirect_tirs(priv); + err = mlx5e_create_indirect_tirs(priv, true); if (err) goto err_destroy_direct_rqts; @@ -333,11 +386,15 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_indirect_tirs(priv, true); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_close_drop_rq: + mlx5e_close_drop_rq(&priv->drop_rq); +err_destroy_q_counters: + mlx5e_destroy_q_counters(priv); return err; } @@ -345,9 +402,11 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { mlx5i_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv); - mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_indirect_tirs(priv, true); mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_destroy_q_counters(priv); } static const struct mlx5e_profile mlx5i_nic_profile = { @@ -360,7 +419,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = { .enable = NULL, /* mlx5i_enable */ .disable = NULL, /* mlx5i_disable */ .update_stats = NULL, /* mlx5i_update_stats */ - .max_nch = mlx5e_get_max_num_channels, .update_carrier = NULL, /* no HW update in IB link */ .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ @@ -592,7 +650,6 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev) mlx5e_detach_netdev(priv); profile->cleanup(priv); - destroy_workqueue(priv->wq); if (!ipriv->sub_interface) { mlx5i_pkey_qpn_ht_cleanup(netdev); @@ -600,58 +657,37 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev) } } -struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, - struct ib_device *ibdev, - const char *name, - void (*setup)(struct net_device *)) +static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev) +{ + return mdev->mlx5e_res.pdn != 0; +} + +static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev) +{ + if (mlx5_is_sub_interface(mdev)) + return mlx5i_pkey_get_profile(); + return &mlx5i_nic_profile; +} + +static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num, + struct net_device *netdev, void *param) { - const struct mlx5e_profile *profile; - struct net_device *netdev; + struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param; + const struct mlx5e_profile *prof = mlx5_get_profile(mdev); struct mlx5i_priv *ipriv; struct mlx5e_priv *epriv; struct rdma_netdev *rn; - bool sub_interface; - int nch; int err; - if (mlx5i_check_required_hca_cap(mdev)) { - mlx5_core_warn(mdev, "Accelerated mode is not supported\n"); - return ERR_PTR(-EOPNOTSUPP); - } - - /* TODO: Need to find a better way to check if child device*/ - sub_interface = (mdev->mlx5e_res.pdn != 0); - - if (sub_interface) - profile = mlx5i_pkey_get_profile(); - else - profile = &mlx5i_nic_profile; - - nch = profile->max_nch(mdev); - - netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv), - name, NET_NAME_UNKNOWN, - setup, - nch * MLX5E_MAX_NUM_TC, - nch); - if (!netdev) { - mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n"); - return NULL; - } - ipriv = netdev_priv(netdev); epriv = mlx5i_epriv(netdev); - epriv->wq = create_singlethread_workqueue("mlx5i"); - if (!epriv->wq) - goto err_free_netdev; - - ipriv->sub_interface = sub_interface; + ipriv->sub_interface = mlx5_is_sub_interface(mdev); if (!ipriv->sub_interface) { err = mlx5i_pkey_qpn_ht_init(netdev); if (err) { mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n"); - goto destroy_wq; + return err; } /* This should only be called once per mdev */ @@ -660,7 +696,7 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, goto destroy_ht; } - profile->init(mdev, netdev, profile, ipriv); + prof->init(mdev, netdev, prof, ipriv); mlx5e_attach_netdev(epriv); netif_carrier_off(netdev); @@ -676,15 +712,35 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, netdev->priv_destructor = mlx5_rdma_netdev_free; netdev->needs_free_netdev = 1; - return netdev; + return 0; destroy_ht: mlx5i_pkey_qpn_ht_cleanup(netdev); -destroy_wq: - destroy_workqueue(epriv->wq); -err_free_netdev: - free_netdev(netdev); + return err; +} - return NULL; +int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, + struct ib_device *device, + struct rdma_netdev_alloc_params *params) +{ + int nch; + int rc; + + rc = mlx5i_check_required_hca_cap(mdev); + if (rc) + return rc; + + nch = mlx5e_get_max_num_channels(mdev); + + *params = (struct rdma_netdev_alloc_params){ + .sizeof_priv = sizeof(struct mlx5i_priv) + + sizeof(struct mlx5e_priv), + .txqs = nch * MLX5E_MAX_NUM_TC, + .rxqs = nch, + .param = mdev, + .initialize_rdma_netdev = mlx5_rdma_setup_rn, + }; + + return 0; } -EXPORT_SYMBOL(mlx5_rdma_netdev_alloc); +EXPORT_SYMBOL(mlx5_rdma_rn_get_params); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 08eac92fc26c..9165ca567047 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -84,10 +84,11 @@ void mlx5i_dev_cleanup(struct net_device *dev); int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); /* Parent profile functions */ -void mlx5i_init(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv); +int mlx5i_init(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv); +void mlx5i_cleanup(struct mlx5e_priv *priv); /* Get child interface nic profile */ const struct mlx5e_profile *mlx5i_pkey_get_profile(void); @@ -109,18 +110,18 @@ struct mlx5i_tx_wqe { static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq, struct mlx5i_tx_wqe **wqe, - u16 *pi) + u16 pi) { struct mlx5_wq_cyc *wq = &sq->wq; - *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); - *wqe = mlx5_wq_cyc_get_wqe(wq, *pi); + *wqe = mlx5_wq_cyc_get_wqe(wq, pi); memset(*wqe, 0, sizeof(**wqe)); } netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey); void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); #endif /* CONFIG_MLX5_CORE_IPOIB */ #endif /* __MLX5E_IPOB_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 54a188f41f90..b491b8f5fd6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -146,6 +146,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_open = mlx5i_pkey_open, .ndo_stop = mlx5i_pkey_close, .ndo_init = mlx5i_pkey_dev_init, + .ndo_get_stats64 = mlx5i_get_stats, .ndo_uninit = mlx5i_pkey_dev_cleanup, .ndo_change_mtu = mlx5i_pkey_change_mtu, .ndo_do_ioctl = mlx5i_pkey_ioctl, @@ -274,14 +275,17 @@ static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu) } /* Called directly after IPoIB netdevice was created to initialize SW structs */ -static void mlx5i_pkey_init(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv) +static int mlx5i_pkey_init(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); + int err; - mlx5i_init(mdev, netdev, profile, ppriv); + err = mlx5i_init(mdev, netdev, profile, ppriv); + if (err) + return err; /* Override parent ndo */ netdev->netdev_ops = &mlx5i_pkey_netdev_ops; @@ -291,12 +295,14 @@ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev, /* Use dummy rqs */ priv->channels.params.log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + + return 0; } /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ static void mlx5i_pkey_cleanup(struct mlx5e_priv *priv) { - /* Do nothing .. */ + mlx5i_cleanup(priv); } static int mlx5i_pkey_init_tx(struct mlx5e_priv *priv) @@ -345,7 +351,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = { .enable = NULL, .disable = NULL, .update_stats = NULL, - .max_nch = mlx5e_get_max_num_channels, .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ .max_tc = MLX5I_MAX_NUM_TC, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 3f767cde4c1d..0d90b1b4a3d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -111,10 +111,10 @@ static void mlx5_pps_out(struct work_struct *work) for (i = 0; i < clock->ptp_info.n_pins; i++) { u64 tstart; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); tstart = clock->pps_info.start[i]; clock->pps_info.start[i] = 0; - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); if (!tstart) continue; @@ -132,10 +132,10 @@ static void mlx5_timestamp_overflow(struct work_struct *work) overflow_work); unsigned long flags; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); timecounter_read(&clock->tc); mlx5_update_clock_info_page(clock->mdev); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); schedule_delayed_work(&clock->overflow_work, clock->overflow_period); } @@ -147,10 +147,10 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, u64 ns = timespec64_to_ns(ts); unsigned long flags; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); timecounter_init(&clock->tc, &clock->cycles, ns); mlx5_update_clock_info_page(clock->mdev); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); return 0; } @@ -162,9 +162,9 @@ static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) u64 ns; unsigned long flags; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); ns = timecounter_read(&clock->tc); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); *ts = ns_to_timespec64(ns); @@ -177,10 +177,10 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) ptp_info); unsigned long flags; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); timecounter_adjtime(&clock->tc, delta); mlx5_update_clock_info_page(clock->mdev); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); return 0; } @@ -203,12 +203,12 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) adj *= delta; diff = div_u64(adj, 1000000000ULL); - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); timecounter_read(&clock->tc); clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff : clock->nominal_c_mult + diff; mlx5_update_clock_info_page(clock->mdev); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); return 0; } @@ -307,12 +307,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, ts.tv_nsec = rq->perout.start.nsec; ns = timespec64_to_ns(&ts); cycles_now = mlx5_read_internal_timer(mdev); - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); nsec_delta = ns - nsec_now; cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, clock->cycles.mult); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); time_stamp = cycles_now + cycles_delta; field_select = MLX5_MTPPS_FS_PIN_MODE | MLX5_MTPPS_FS_PATTERN | @@ -471,14 +471,14 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev, ts.tv_sec += 1; ts.tv_nsec = 0; ns = timespec64_to_ns(&ts); - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); nsec_delta = ns - nsec_now; cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, clock->cycles.mult); clock->pps_info.start[pin] = cycles_now + cycles_delta; schedule_work(&clock->pps_info.out_work); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); break; default: mlx5_core_err(mdev, " Unhandled event\n"); @@ -498,7 +498,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); return; } - rwlock_init(&clock->lock); + seqlock_init(&clock->lock); clock->cycles.read = read_internal_timer; clock->cycles.shift = MLX5_CYCLES_SHIFT; clock->cycles.mult = clocksource_khz2mult(dev_freq, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h index 02e2e4575e4f..263cb6e2aeee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h @@ -46,11 +46,13 @@ static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev) static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, u64 timestamp) { + unsigned int seq; u64 nsec; - read_lock(&clock->lock); - nsec = timecounter_cyc2time(&clock->tc, timestamp); - read_unlock(&clock->lock); + do { + seq = read_seqbegin(&clock->lock); + nsec = timecounter_cyc2time(&clock->tc, timestamp); + } while (read_seqretry(&clock->lock, seq)); return ns_to_ktime(nsec); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index b5e9f664fc66..28132c7dc05f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1594,12 +1594,17 @@ static const struct pci_error_handlers mlx5_err_handler = { static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) { - int ret; + bool fast_teardown = false, force_teardown = false; + int ret = 1; + + fast_teardown = MLX5_CAP_GEN(dev, fast_teardown); + force_teardown = MLX5_CAP_GEN(dev, force_teardown); + + mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown); + mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown); - if (!MLX5_CAP_GEN(dev, force_teardown)) { - mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n"); + if (!fast_teardown && !force_teardown) return -EOPNOTSUPP; - } if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); @@ -1612,13 +1617,19 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) mlx5_drain_health_wq(dev); mlx5_stop_health_poll(dev, false); + ret = mlx5_cmd_fast_teardown_hca(dev); + if (!ret) + goto succeed; + ret = mlx5_cmd_force_teardown_hca(dev); - if (ret) { - mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret); - mlx5_start_health_poll(dev); - return ret; - } + if (!ret) + goto succeed; + + mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret); + mlx5_start_health_poll(dev); + return ret; +succeed: mlx5_enter_error_state(dev, true); /* Some platforms requiring freeing the IRQ's in the shutdown diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index b4134fa0bba3..0594d0961cb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -39,6 +39,7 @@ #include <linux/if_link.h> #include <linux/firmware.h> #include <linux/mlx5/cq.h> +#include <linux/mlx5/fs.h> #define DRIVER_NAME "mlx5_core" #define DRIVER_VERSION "5.0-0" @@ -95,6 +96,8 @@ int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev); +int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev); + void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); void mlx5_core_page_fault(struct mlx5_core_dev *dev, @@ -169,17 +172,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); void mlx5_dev_list_lock(void); void mlx5_dev_list_unlock(void); int mlx5_dev_list_trylock(void); -int mlx5_encap_alloc(struct mlx5_core_dev *dev, - int header_type, - size_t size, - void *encap_header, - u32 *encap_id); -void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id); - -int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, - u8 namespace, u8 num_actions, - void *modify_actions, u32 *modify_header_id); -void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id); bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); @@ -214,4 +206,14 @@ int mlx5_lag_allow(struct mlx5_core_dev *dev); int mlx5_lag_forbid(struct mlx5_core_dev *dev); void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); + +enum { + MLX5_NIC_IFC_FULL = 0, + MLX5_NIC_IFC_DISABLED = 1, + MLX5_NIC_IFC_NO_DRAM_NIC = 2, + MLX5_NIC_IFC_INVALID = 3 +}; + +u8 mlx5_get_nic_state(struct mlx5_core_dev *dev); +void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state); #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 4ca07bfb6b14..91b8139a388d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -211,6 +211,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev, } qp->qpn = MLX5_GET(create_dct_out, out, dctn); + qp->uid = MLX5_GET(create_dct_in, in, uid); err = create_resource_common(dev, qp, MLX5_RES_DCT); if (err) goto err_cmd; @@ -219,6 +220,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev, err_cmd: MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT); MLX5_SET(destroy_dct_in, din, dctn, qp->qpn); + MLX5_SET(destroy_dct_in, din, uid, qp->uid); mlx5_cmd_exec(dev, (void *)&in, sizeof(din), (void *)&out, sizeof(dout)); return err; @@ -240,6 +242,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, if (err) return err; + qp->uid = MLX5_GET(create_qp_in, in, uid); qp->qpn = MLX5_GET(create_qp_out, out, qpn); mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); @@ -261,6 +264,7 @@ err_cmd: memset(dout, 0, sizeof(dout)); MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP); MLX5_SET(destroy_qp_in, din, qpn, qp->qpn); + MLX5_SET(destroy_qp_in, din, uid, qp->uid); mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); return err; } @@ -275,6 +279,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev, MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT); MLX5_SET(drain_dct_in, in, dctn, qp->qpn); + MLX5_SET(drain_dct_in, in, uid, qp->uid); return mlx5_cmd_exec(dev, (void *)&in, sizeof(in), (void *)&out, sizeof(out)); } @@ -301,6 +306,7 @@ destroy: destroy_resource_common(dev, &dct->mqp); MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT); MLX5_SET(destroy_dct_in, in, dctn, qp->qpn); + MLX5_SET(destroy_dct_in, in, uid, qp->uid); err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in), (void *)&out, sizeof(out)); return err; @@ -320,6 +326,7 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); + MLX5_SET(destroy_qp_in, in, uid, qp->uid); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; @@ -373,7 +380,7 @@ static void mbox_free(struct mbox_info *mbox) static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, u32 opt_param_mask, void *qpc, - struct mbox_info *mbox) + struct mbox_info *mbox, u16 uid) { mbox->out = NULL; mbox->in = NULL; @@ -381,26 +388,32 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, #define MBOX_ALLOC(mbox, typ) \ mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out)) -#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \ - MLX5_SET(typ##_in, in, opcode, _opcode); \ - MLX5_SET(typ##_in, in, qpn, _qpn) - -#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \ - MOD_QP_IN_SET(typ, in, _opcode, _qpn); \ - MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \ - memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc)) +#define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \ + do { \ + MLX5_SET(typ##_in, in, opcode, _opcode); \ + MLX5_SET(typ##_in, in, qpn, _qpn); \ + MLX5_SET(typ##_in, in, uid, _uid); \ + } while (0) + +#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \ + do { \ + MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \ + MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \ + memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \ + MLX5_ST_SZ_BYTES(qpc)); \ + } while (0) switch (opcode) { /* 2RST & 2ERR */ case MLX5_CMD_OP_2RST_QP: if (MBOX_ALLOC(mbox, qp_2rst)) return -ENOMEM; - MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn); + MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid); break; case MLX5_CMD_OP_2ERR_QP: if (MBOX_ALLOC(mbox, qp_2err)) return -ENOMEM; - MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn); + MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid); break; /* MODIFY with QPC */ @@ -408,37 +421,37 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, if (MBOX_ALLOC(mbox, rst2init_qp)) return -ENOMEM; MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn, - opt_param_mask, qpc); + opt_param_mask, qpc, uid); break; case MLX5_CMD_OP_INIT2RTR_QP: if (MBOX_ALLOC(mbox, init2rtr_qp)) return -ENOMEM; MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn, - opt_param_mask, qpc); + opt_param_mask, qpc, uid); break; case MLX5_CMD_OP_RTR2RTS_QP: if (MBOX_ALLOC(mbox, rtr2rts_qp)) return -ENOMEM; MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn, - opt_param_mask, qpc); + opt_param_mask, qpc, uid); break; case MLX5_CMD_OP_RTS2RTS_QP: if (MBOX_ALLOC(mbox, rts2rts_qp)) return -ENOMEM; MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn, - opt_param_mask, qpc); + opt_param_mask, qpc, uid); break; case MLX5_CMD_OP_SQERR2RTS_QP: if (MBOX_ALLOC(mbox, sqerr2rts_qp)) return -ENOMEM; MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn, - opt_param_mask, qpc); + opt_param_mask, qpc, uid); break; case MLX5_CMD_OP_INIT2INIT_QP: if (MBOX_ALLOC(mbox, init2init_qp)) return -ENOMEM; MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn, - opt_param_mask, qpc); + opt_param_mask, qpc, uid); break; default: mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n", @@ -456,7 +469,7 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, int err; err = modify_qp_mbox_alloc(dev, opcode, qp->qpn, - opt_param_mask, qpc, &mbox); + opt_param_mask, qpc, &mbox, qp->uid); if (err) return err; @@ -531,6 +544,17 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) } EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); +static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {}; + u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {}; + + MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); + MLX5_SET(destroy_rq_in, in, rqn, rqn); + MLX5_SET(destroy_rq_in, in, uid, uid); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *rq) { @@ -541,6 +565,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, if (err) return err; + rq->uid = MLX5_GET(create_rq_in, in, uid); rq->qpn = rqn; err = create_resource_common(dev, rq, MLX5_RES_RQ); if (err) @@ -549,7 +574,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, return 0; err_destroy_rq: - mlx5_core_destroy_rq(dev, rq->qpn); + destroy_rq_tracked(dev, rq->qpn, rq->uid); return err; } @@ -559,10 +584,21 @@ void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, struct mlx5_core_qp *rq) { destroy_resource_common(dev, rq); - mlx5_core_destroy_rq(dev, rq->qpn); + destroy_rq_tracked(dev, rq->qpn, rq->uid); } EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked); +static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid) +{ + u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {}; + u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {}; + + MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ); + MLX5_SET(destroy_sq_in, in, sqn, sqn); + MLX5_SET(destroy_sq_in, in, uid, uid); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *sq) { @@ -573,6 +609,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, if (err) return err; + sq->uid = MLX5_GET(create_sq_in, in, uid); sq->qpn = sqn; err = create_resource_common(dev, sq, MLX5_RES_SQ); if (err) @@ -581,7 +618,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, return 0; err_destroy_sq: - mlx5_core_destroy_sq(dev, sq->qpn); + destroy_sq_tracked(dev, sq->qpn, sq->uid); return err; } @@ -591,7 +628,7 @@ void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, struct mlx5_core_qp *sq) { destroy_resource_common(dev, sq); - mlx5_core_destroy_sq(dev, sq->qpn); + destroy_sq_tracked(dev, sq->qpn, sq->uid); } EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index 23cc337a96c9..6a6fc9be01e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -73,7 +73,7 @@ static int get_pas_size(struct mlx5_srq_attr *in) u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride); u32 page_size = 1 << log_page_size; u32 rq_sz_po = rq_sz + (page_offset * po_quanta); - u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size; + u32 rq_num_pas = DIV_ROUND_UP(rq_sz_po, page_size); return rq_num_pas * sizeof(u64); } @@ -166,6 +166,7 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, if (!create_in) return -ENOMEM; + MLX5_SET(create_srq_in, create_in, uid, in->uid); srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry); pas = MLX5_ADDR_OF(create_srq_in, create_in, pas); @@ -178,8 +179,10 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, err = mlx5_cmd_exec(dev, create_in, inlen, create_out, sizeof(create_out)); kvfree(create_in); - if (!err) + if (!err) { srq->srqn = MLX5_GET(create_srq_out, create_out, srqn); + srq->uid = in->uid; + } return err; } @@ -193,6 +196,7 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev, MLX5_SET(destroy_srq_in, srq_in, opcode, MLX5_CMD_OP_DESTROY_SRQ); MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn); + MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid); return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out)); @@ -208,6 +212,7 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ); MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn); MLX5_SET(arm_rq_in, srq_in, lwm, lwm); + MLX5_SET(arm_rq_in, srq_in, uid, srq->uid); return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out)); @@ -260,6 +265,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, if (!create_in) return -ENOMEM; + MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid); xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srq_context_entry); pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas); @@ -277,6 +283,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, goto out; srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn); + srq->uid = in->uid; out: kvfree(create_in); return err; @@ -291,6 +298,7 @@ static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev, MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ); MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); + MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid); return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out, sizeof(xrcsrq_out)); @@ -306,6 +314,7 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev, MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm); + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid); return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out, sizeof(xrcsrq_out)); @@ -365,10 +374,13 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, wq = MLX5_ADDR_OF(rmpc, rmpc, wq); MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); + MLX5_SET(create_rmp_in, create_in, uid, in->uid); set_wq(wq, in); memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size); err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn); + if (!err) + srq->uid = in->uid; kvfree(create_in); return err; @@ -377,7 +389,13 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, static int destroy_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) { - return mlx5_core_destroy_rmp(dev, srq->srqn); + u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {}; + u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {}; + + MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP); + MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn); + MLX5_SET(destroy_rmp_in, in, uid, srq->uid); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } static int arm_rmp_cmd(struct mlx5_core_dev *dev, @@ -400,6 +418,7 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev, MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY); MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn); + MLX5_SET(modify_rmp_in, in, uid, srq->uid); MLX5_SET(wq, wq, lwm, lwm); MLX5_SET(rmp_bitmask, bitmask, lwm, 1); MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); @@ -469,11 +488,14 @@ static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, MLX5_SET(xrqc, xrqc, user_index, in->user_index); MLX5_SET(xrqc, xrqc, cqn, in->cqn); MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ); + MLX5_SET(create_xrq_in, create_in, uid, in->uid); err = mlx5_cmd_exec(dev, create_in, inlen, create_out, sizeof(create_out)); kvfree(create_in); - if (!err) + if (!err) { srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn); + srq->uid = in->uid; + } return err; } @@ -485,6 +507,7 @@ static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ); MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn); + MLX5_SET(destroy_xrq_in, in, uid, srq->uid); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } @@ -500,6 +523,7 @@ static int arm_xrq_cmd(struct mlx5_core_dev *dev, MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ); MLX5_SET(arm_rq_in, in, srq_number, srq->srqn); MLX5_SET(arm_rq_in, in, lwm, lwm); + MLX5_SET(arm_rq_in, in, uid, srq->uid); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index b02af317c125..cfbea66b4879 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -1201,3 +1201,12 @@ int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev) return err; } EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport); + +u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev) +{ + if (!mdev->sys_image_guid) + mlx5_query_nic_vport_system_image_guid(mdev, &mdev->sys_image_guid); + + return mdev->sys_image_guid; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 68e7f8df2a6d..2dcbf1ebfd6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -39,11 +39,6 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) return (u32)wq->fbc.sz_m1 + 1; } -u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) -{ - return wq->fbc.frag_sz_m1 + 1; -} - u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) { return wq->fbc.sz_m1 + 1; @@ -54,54 +49,37 @@ u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) return (u32)wq->fbc.sz_m1 + 1; } -static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq) -{ - return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride; -} - -static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq) -{ - return mlx5_wq_cyc_get_byte_size(&wq->rq) + - mlx5_wq_cyc_get_byte_size(&wq->sq); -} - -static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) +static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) { - return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride; -} - -static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) -{ - return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride; + return ((u32)1 << log_sz) << log_stride; } int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl) { + u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride); + u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz); struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; int err; - mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride), - MLX5_GET(wq, wqc, log_wq_sz), - fbc); - wq->sz = wq->fbc.sz_m1 + 1; - err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } - err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), + wq->db = wq_ctrl->db.db; + + err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride), &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } - fbc->frag_buf = wq_ctrl->buf; - wq->db = wq_ctrl->db.db; + mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc); + wq->sz = mlx5_wq_cyc_get_size(wq); wq_ctrl->mdev = mdev; @@ -113,46 +91,19 @@ err_db_free: return err; } -static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf, - struct mlx5_wq_qp *qp) -{ - struct mlx5_frag_buf_ctrl *sq_fbc; - struct mlx5_frag_buf *rqb, *sqb; - - rqb = &qp->rq.fbc.frag_buf; - *rqb = *buf; - rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); - rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE); - - sq_fbc = &qp->sq.fbc; - sqb = &sq_fbc->frag_buf; - *sqb = *buf; - sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq); - sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE); - sqb->frags += rqb->npages; /* first part is for the rq */ - if (sq_fbc->strides_offset) - sqb->frags--; -} - int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, struct mlx5_wq_ctrl *wq_ctrl) { - u16 sq_strides_offset; - u32 rq_pg_remainder; - int err; + u8 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4; + u8 log_rq_sz = MLX5_GET(qpc, qpc, log_rq_size); + u8 log_sq_stride = ilog2(MLX5_SEND_WQE_BB); + u8 log_sq_sz = MLX5_GET(qpc, qpc, log_sq_size); - mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, - MLX5_GET(qpc, qpc, log_rq_size), - &wq->rq.fbc); + u32 rq_byte_size; + int err; - rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE; - sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB; - mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), - MLX5_GET(qpc, qpc, log_sq_size), - sq_strides_offset, - &wq->sq.fbc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -160,14 +111,32 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, return err; } - err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq), + err = mlx5_frag_buf_alloc_node(mdev, + wq_get_byte_sz(log_rq_sz, log_rq_stride) + + wq_get_byte_sz(log_sq_sz, log_sq_stride), &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } - mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq); + mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc); + + rq_byte_size = wq_get_byte_sz(log_rq_sz, log_rq_stride); + + if (rq_byte_size < PAGE_SIZE) { + /* SQ starts within the same page of the RQ */ + u16 sq_strides_offset = rq_byte_size / MLX5_SEND_WQE_BB; + + mlx5_init_fbc_offset(wq_ctrl->buf.frags, + log_sq_stride, log_sq_sz, sq_strides_offset, + &wq->sq.fbc); + } else { + u16 rq_npages = rq_byte_size >> PAGE_SHIFT; + + mlx5_init_fbc(wq_ctrl->buf.frags + rq_npages, + log_sq_stride, log_sq_sz, &wq->sq.fbc); + } wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; @@ -186,17 +155,19 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, struct mlx5_wq_ctrl *wq_ctrl) { + u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) + 6; + u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size); int err; - mlx5_core_init_cq_frag_buf(&wq->fbc, cqc); - err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } - err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), + wq->db = wq_ctrl->db.db; + + err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride), &wq_ctrl->buf, param->buf_numa_node); if (err) { @@ -205,8 +176,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - wq->fbc.frag_buf = wq_ctrl->buf; - wq->db = wq_ctrl->db.db; + mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc); wq_ctrl->mdev = mdev; @@ -222,30 +192,29 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, struct mlx5_wq_ctrl *wq_ctrl) { + u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride); + u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz); struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; struct mlx5_wqe_srq_next_seg *next_seg; int err; int i; - mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride), - MLX5_GET(wq, wqc, log_wq_sz), - fbc); - err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); return err; } - err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq), + wq->db = wq_ctrl->db.db; + + err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride), &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } - wq->fbc.frag_buf = wq_ctrl->buf; - wq->db = wq_ctrl->db.db; + mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc); for (i = 0; i < fbc->sz_m1; i++) { next_seg = mlx5_wq_ll_get_wqe(wq, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 3a1a170bb2d7..b1293d153a58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl); u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); -u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, @@ -140,11 +139,6 @@ static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) return ctr & wq->fbc.sz_m1; } -static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr) -{ - return ctr & wq->fbc.frag_sz_m1; -} - static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq) { return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr); @@ -160,6 +154,11 @@ static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } +static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix) +{ + return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1; +} + static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) { int equal = (cc1 == cc2); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 68fa44a41485..1f77e97e2d7a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -27,7 +27,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_acl_flex_keys.o \ spectrum1_mr_tcam.o spectrum2_mr_tcam.o \ spectrum_mr_tcam.o spectrum_mr.o \ - spectrum_qdisc.o spectrum_span.o + spectrum_qdisc.o spectrum_span.o \ + spectrum_nve.o spectrum_nve_vxlan.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 81533d7f395c..937d0ace699a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1055,6 +1055,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, err_driver_init: mlxsw_thermal_fini(mlxsw_core->thermal); err_thermal_init: + mlxsw_hwmon_fini(mlxsw_core->hwmon); err_hwmon_init: if (!reload) devlink_unregister(devlink); @@ -1088,6 +1089,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, if (mlxsw_core->driver->fini) mlxsw_core->driver->fini(mlxsw_core); mlxsw_thermal_fini(mlxsw_core->thermal); + mlxsw_hwmon_fini(mlxsw_core->hwmon); if (!reload) devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 655ddd204ab2..c35be477856f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -359,6 +359,10 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, return 0; } +static inline void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon) +{ +} + #endif struct mlxsw_thermal; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index f6cf2896d337..e04e8162aa14 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -303,8 +303,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, struct device *hwmon_dev; int err; - mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon), - GFP_KERNEL); + mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL); if (!mlxsw_hwmon) return -ENOMEM; mlxsw_hwmon->core = mlxsw_core; @@ -321,10 +320,9 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group; mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs; - hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev, - "mlxsw", - mlxsw_hwmon, - mlxsw_hwmon->groups); + hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev, + "mlxsw", mlxsw_hwmon, + mlxsw_hwmon->groups); if (IS_ERR(hwmon_dev)) { err = PTR_ERR(hwmon_dev); goto err_hwmon_register; @@ -337,5 +335,12 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, err_hwmon_register: err_fans_init: err_temp_init: + kfree(mlxsw_hwmon); return err; } + +void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon) +{ + hwmon_device_unregister(mlxsw_hwmon->hwmon_dev); + kfree(mlxsw_hwmon); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index 83f452b7ccbb..bb99f6d41fe0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -221,7 +221,7 @@ MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8); MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8); /* pci_eqe_cqn - * Completion Queue that triggeret this EQE. + * Completion Queue that triggered this EQE. */ MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6e8b619b769b..32cb6718bb17 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -295,6 +295,7 @@ enum mlxsw_reg_sfd_rec_type { MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0, MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1, MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2, + MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL = 0xC, }; /* reg_sfd_rec_type @@ -525,6 +526,61 @@ mlxsw_reg_sfd_mc_pack(char *payload, int rec_index, mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid); } +/* reg_sfd_uc_tunnel_uip_msb + * When protocol is IPv4, the most significant byte of the underlay IPv4 + * destination IP. + * When protocol is IPv6, reserved. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_msb, MLXSW_REG_SFD_BASE_LEN, 24, + 8, MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_tunnel_fid + * Filtering ID. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_fid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +enum mlxsw_reg_sfd_uc_tunnel_protocol { + MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4, + MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6, +}; + +/* reg_sfd_uc_tunnel_protocol + * IP protocol. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_protocol, MLXSW_REG_SFD_BASE_LEN, 27, + 1, MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +/* reg_sfd_uc_tunnel_uip_lsb + * When protocol is IPv4, the least significant bytes of the underlay + * IPv4 destination IP. + * When protocol is IPv6, pointer to the underlay IPv6 destination IP + * which is configured by RIPS. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_lsb, MLXSW_REG_SFD_BASE_LEN, 0, + 24, MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +static inline void +mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 fid, + enum mlxsw_reg_sfd_rec_action action, u32 uip, + enum mlxsw_reg_sfd_uc_tunnel_protocol proto) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac, + action); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24); + mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip); + mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid); + mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto); +} + /* SFN - Switch FDB Notification Register * ------------------------------------------- * The switch provides notifications on newly learned FDB entries and @@ -1069,6 +1125,8 @@ enum mlxsw_reg_sfdf_flush_type { MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID, MLXSW_REG_SFDF_FLUSH_PER_LAG, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID, + MLXSW_REG_SFDF_FLUSH_PER_NVE, + MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID, }; /* reg_sfdf_flush_type @@ -1079,6 +1137,10 @@ enum mlxsw_reg_sfdf_flush_type { * 3 - All FID dynamic entries pointing to port are flushed. * 4 - All dynamic entries pointing to LAG are flushed. * 5 - All FID dynamic entries pointing to LAG are flushed. + * 6 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are + * flushed. + * 7 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are + * flushed, per FID. * Access: RW */ MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4); @@ -1315,12 +1377,19 @@ MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4); */ MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20); -static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash) +/* reg_slcr_seed + * LAG seed value. The seed is the same for all ports. + * Access: RW + */ +MLXSW_ITEM32(reg, slcr, seed, 0x08, 0, 32); + +static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash, u32 seed) { MLXSW_REG_ZERO(slcr, payload); mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL); mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC); mlxsw_reg_slcr_lag_hash_set(payload, lag_hash); + mlxsw_reg_slcr_seed_set(payload, seed); } /* SLCOR - Switch LAG Collector Register @@ -8279,6 +8348,508 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index, mlxsw_reg_mgpc_opcode_set(payload, opcode); } +/* MPRS - Monitoring Parsing State Register + * ---------------------------------------- + * The MPRS register is used for setting up the parsing for hash, + * policy-engine and routing. + */ +#define MLXSW_REG_MPRS_ID 0x9083 +#define MLXSW_REG_MPRS_LEN 0x14 + +MLXSW_REG_DEFINE(mprs, MLXSW_REG_MPRS_ID, MLXSW_REG_MPRS_LEN); + +/* reg_mprs_parsing_depth + * Minimum parsing depth. + * Need to enlarge parsing depth according to L3, MPLS, tunnels, ACL + * rules, traps, hash, etc. Default is 96 bytes. Reserved when SwitchX-2. + * Access: RW + */ +MLXSW_ITEM32(reg, mprs, parsing_depth, 0x00, 0, 16); + +/* reg_mprs_parsing_en + * Parsing enable. + * Bit 0 - Enable parsing of NVE of types VxLAN, VxLAN-GPE, GENEVE and + * NVGRE. Default is enabled. Reserved when SwitchX-2. + * Access: RW + */ +MLXSW_ITEM32(reg, mprs, parsing_en, 0x04, 0, 16); + +/* reg_mprs_vxlan_udp_dport + * VxLAN UDP destination port. + * Used for identifying VxLAN packets and for dport field in + * encapsulation. Default is 4789. + * Access: RW + */ +MLXSW_ITEM32(reg, mprs, vxlan_udp_dport, 0x10, 0, 16); + +static inline void mlxsw_reg_mprs_pack(char *payload, u16 parsing_depth, + u16 vxlan_udp_dport) +{ + MLXSW_REG_ZERO(mprs, payload); + mlxsw_reg_mprs_parsing_depth_set(payload, parsing_depth); + mlxsw_reg_mprs_parsing_en_set(payload, true); + mlxsw_reg_mprs_vxlan_udp_dport_set(payload, vxlan_udp_dport); +} + +/* TNGCR - Tunneling NVE General Configuration Register + * ---------------------------------------------------- + * The TNGCR register is used for setting up the NVE Tunneling configuration. + */ +#define MLXSW_REG_TNGCR_ID 0xA001 +#define MLXSW_REG_TNGCR_LEN 0x44 + +MLXSW_REG_DEFINE(tngcr, MLXSW_REG_TNGCR_ID, MLXSW_REG_TNGCR_LEN); + +enum mlxsw_reg_tngcr_type { + MLXSW_REG_TNGCR_TYPE_VXLAN, + MLXSW_REG_TNGCR_TYPE_VXLAN_GPE, + MLXSW_REG_TNGCR_TYPE_GENEVE, + MLXSW_REG_TNGCR_TYPE_NVGRE, +}; + +/* reg_tngcr_type + * Tunnel type for encapsulation and decapsulation. The types are mutually + * exclusive. + * Note: For Spectrum the NVE parsing must be enabled in MPRS. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, type, 0x00, 0, 4); + +/* reg_tngcr_nve_valid + * The VTEP is valid. Allows adding FDB entries for tunnel encapsulation. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_valid, 0x04, 31, 1); + +/* reg_tngcr_nve_ttl_uc + * The TTL for NVE tunnel encapsulation underlay unicast packets. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_ttl_uc, 0x04, 0, 8); + +/* reg_tngcr_nve_ttl_mc + * The TTL for NVE tunnel encapsulation underlay multicast packets. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_ttl_mc, 0x08, 0, 8); + +enum { + /* Do not copy flow label. Calculate flow label using nve_flh. */ + MLXSW_REG_TNGCR_FL_NO_COPY, + /* Copy flow label from inner packet if packet is IPv6 and + * encapsulation is by IPv6. Otherwise, calculate flow label using + * nve_flh. + */ + MLXSW_REG_TNGCR_FL_COPY, +}; + +/* reg_tngcr_nve_flc + * For NVE tunnel encapsulation: Flow label copy from inner packet. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_flc, 0x0C, 25, 1); + +enum { + /* Flow label is static. In Spectrum this means '0'. Spectrum-2 + * uses {nve_fl_prefix, nve_fl_suffix}. + */ + MLXSW_REG_TNGCR_FL_NO_HASH, + /* 8 LSBs of the flow label are calculated from ECMP hash of the + * inner packet. 12 MSBs are configured by nve_fl_prefix. + */ + MLXSW_REG_TNGCR_FL_HASH, +}; + +/* reg_tngcr_nve_flh + * NVE flow label hash. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_flh, 0x0C, 24, 1); + +/* reg_tngcr_nve_fl_prefix + * NVE flow label prefix. Constant 12 MSBs of the flow label. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_fl_prefix, 0x0C, 8, 12); + +/* reg_tngcr_nve_fl_suffix + * NVE flow label suffix. Constant 8 LSBs of the flow label. + * Reserved when nve_flh=1 and for Spectrum. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_fl_suffix, 0x0C, 0, 8); + +enum { + /* Source UDP port is fixed (default '0') */ + MLXSW_REG_TNGCR_UDP_SPORT_NO_HASH, + /* Source UDP port is calculated based on hash */ + MLXSW_REG_TNGCR_UDP_SPORT_HASH, +}; + +/* reg_tngcr_nve_udp_sport_type + * NVE UDP source port type. + * Spectrum uses LAG hash (SLCRv2). Spectrum-2 uses ECMP hash (RECRv2). + * When the source UDP port is calculated based on hash, then the 8 LSBs + * are calculated from hash the 8 MSBs are configured by + * nve_udp_sport_prefix. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_udp_sport_type, 0x10, 24, 1); + +/* reg_tngcr_nve_udp_sport_prefix + * NVE UDP source port prefix. Constant 8 MSBs of the UDP source port. + * Reserved when NVE type is NVGRE. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_udp_sport_prefix, 0x10, 8, 8); + +/* reg_tngcr_nve_group_size_mc + * The amount of sequential linked lists of MC entries. The first linked + * list is configured by SFD.underlay_mc_ptr. + * Valid values: 1, 2, 4, 8, 16, 32, 64 + * The linked list are configured by TNUMT. + * The hash is set by LAG hash. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_group_size_mc, 0x18, 0, 8); + +/* reg_tngcr_nve_group_size_flood + * The amount of sequential linked lists of flooding entries. The first + * linked list is configured by SFMR.nve_tunnel_flood_ptr + * Valid values: 1, 2, 4, 8, 16, 32, 64 + * The linked list are configured by TNUMT. + * The hash is set by LAG hash. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_group_size_flood, 0x1C, 0, 8); + +/* reg_tngcr_learn_enable + * During decapsulation, whether to learn from NVE port. + * Reserved when Spectrum-2. See TNPC. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, learn_enable, 0x20, 31, 1); + +/* reg_tngcr_underlay_virtual_router + * Underlay virtual router. + * Reserved when Spectrum-2. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, underlay_virtual_router, 0x20, 0, 16); + +/* reg_tngcr_underlay_rif + * Underlay ingress router interface. RIF type should be loopback generic. + * Reserved when Spectrum. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, underlay_rif, 0x24, 0, 16); + +/* reg_tngcr_usipv4 + * Underlay source IPv4 address of the NVE. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, usipv4, 0x28, 0, 32); + +/* reg_tngcr_usipv6 + * Underlay source IPv6 address of the NVE. For Spectrum, must not be + * modified under traffic of NVE tunneling encapsulation. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, tngcr, usipv6, 0x30, 16); + +static inline void mlxsw_reg_tngcr_pack(char *payload, + enum mlxsw_reg_tngcr_type type, + bool valid, u8 ttl) +{ + MLXSW_REG_ZERO(tngcr, payload); + mlxsw_reg_tngcr_type_set(payload, type); + mlxsw_reg_tngcr_nve_valid_set(payload, valid); + mlxsw_reg_tngcr_nve_ttl_uc_set(payload, ttl); + mlxsw_reg_tngcr_nve_ttl_mc_set(payload, ttl); + mlxsw_reg_tngcr_nve_flc_set(payload, MLXSW_REG_TNGCR_FL_NO_COPY); + mlxsw_reg_tngcr_nve_flh_set(payload, 0); + mlxsw_reg_tngcr_nve_udp_sport_type_set(payload, + MLXSW_REG_TNGCR_UDP_SPORT_HASH); + mlxsw_reg_tngcr_nve_udp_sport_prefix_set(payload, 0); + mlxsw_reg_tngcr_nve_group_size_mc_set(payload, 1); + mlxsw_reg_tngcr_nve_group_size_flood_set(payload, 1); +} + +/* TNUMT - Tunneling NVE Underlay Multicast Table Register + * ------------------------------------------------------- + * The TNUMT register is for building the underlay MC table. It is used + * for MC, flooding and BC traffic into the NVE tunnel. + */ +#define MLXSW_REG_TNUMT_ID 0xA003 +#define MLXSW_REG_TNUMT_LEN 0x20 + +MLXSW_REG_DEFINE(tnumt, MLXSW_REG_TNUMT_ID, MLXSW_REG_TNUMT_LEN); + +enum mlxsw_reg_tnumt_record_type { + MLXSW_REG_TNUMT_RECORD_TYPE_IPV4, + MLXSW_REG_TNUMT_RECORD_TYPE_IPV6, + MLXSW_REG_TNUMT_RECORD_TYPE_LABEL, +}; + +/* reg_tnumt_record_type + * Record type. + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, record_type, 0x00, 28, 4); + +enum mlxsw_reg_tnumt_tunnel_port { + MLXSW_REG_TNUMT_TUNNEL_PORT_NVE, + MLXSW_REG_TNUMT_TUNNEL_PORT_VPLS, + MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL0, + MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL1, +}; + +/* reg_tnumt_tunnel_port + * Tunnel port. + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, tunnel_port, 0x00, 24, 4); + +/* reg_tnumt_underlay_mc_ptr + * Index to the underlay multicast table. + * For Spectrum the index is to the KVD linear. + * Access: Index + */ +MLXSW_ITEM32(reg, tnumt, underlay_mc_ptr, 0x00, 0, 24); + +/* reg_tnumt_vnext + * The next_underlay_mc_ptr is valid. + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, vnext, 0x04, 31, 1); + +/* reg_tnumt_next_underlay_mc_ptr + * The next index to the underlay multicast table. + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, next_underlay_mc_ptr, 0x04, 0, 24); + +/* reg_tnumt_record_size + * Number of IP addresses in the record. + * Range is 1..cap_max_nve_mc_entries_ipv{4,6} + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, record_size, 0x08, 0, 3); + +/* reg_tnumt_udip + * The underlay IPv4 addresses. udip[i] is reserved if i >= size + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, tnumt, udip, 0x0C, 0, 32, 0x04, 0x00, false); + +/* reg_tnumt_udip_ptr + * The pointer to the underlay IPv6 addresses. udip_ptr[i] is reserved if + * i >= size. The IPv6 addresses are configured by RIPS. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, tnumt, udip_ptr, 0x0C, 0, 24, 0x04, 0x00, false); + +static inline void mlxsw_reg_tnumt_pack(char *payload, + enum mlxsw_reg_tnumt_record_type type, + enum mlxsw_reg_tnumt_tunnel_port tport, + u32 underlay_mc_ptr, bool vnext, + u32 next_underlay_mc_ptr, + u8 record_size) +{ + MLXSW_REG_ZERO(tnumt, payload); + mlxsw_reg_tnumt_record_type_set(payload, type); + mlxsw_reg_tnumt_tunnel_port_set(payload, tport); + mlxsw_reg_tnumt_underlay_mc_ptr_set(payload, underlay_mc_ptr); + mlxsw_reg_tnumt_vnext_set(payload, vnext); + mlxsw_reg_tnumt_next_underlay_mc_ptr_set(payload, next_underlay_mc_ptr); + mlxsw_reg_tnumt_record_size_set(payload, record_size); +} + +/* TNQCR - Tunneling NVE QoS Configuration Register + * ------------------------------------------------ + * The TNQCR register configures how QoS is set in encapsulation into the + * underlay network. + */ +#define MLXSW_REG_TNQCR_ID 0xA010 +#define MLXSW_REG_TNQCR_LEN 0x0C + +MLXSW_REG_DEFINE(tnqcr, MLXSW_REG_TNQCR_ID, MLXSW_REG_TNQCR_LEN); + +/* reg_tnqcr_enc_set_dscp + * For encapsulation: How to set DSCP field: + * 0 - Copy the DSCP from the overlay (inner) IP header to the underlay + * (outer) IP header. If there is no IP header, use TNQDR.dscp + * 1 - Set the DSCP field as TNQDR.dscp + * Access: RW + */ +MLXSW_ITEM32(reg, tnqcr, enc_set_dscp, 0x04, 28, 1); + +static inline void mlxsw_reg_tnqcr_pack(char *payload) +{ + MLXSW_REG_ZERO(tnqcr, payload); + mlxsw_reg_tnqcr_enc_set_dscp_set(payload, 0); +} + +/* TNQDR - Tunneling NVE QoS Default Register + * ------------------------------------------ + * The TNQDR register configures the default QoS settings for NVE + * encapsulation. + */ +#define MLXSW_REG_TNQDR_ID 0xA011 +#define MLXSW_REG_TNQDR_LEN 0x08 + +MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN); + +/* reg_tnqdr_local_port + * Local port number (receive port). CPU port is supported. + * Access: Index + */ +MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8); + +/* reg_tnqdr_dscp + * For encapsulation, the default DSCP. + * Access: RW + */ +MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6); + +static inline void mlxsw_reg_tnqdr_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(tnqdr, payload); + mlxsw_reg_tnqdr_local_port_set(payload, local_port); + mlxsw_reg_tnqdr_dscp_set(payload, 0); +} + +/* TNEEM - Tunneling NVE Encapsulation ECN Mapping Register + * -------------------------------------------------------- + * The TNEEM register maps ECN of the IP header at the ingress to the + * encapsulation to the ECN of the underlay network. + */ +#define MLXSW_REG_TNEEM_ID 0xA012 +#define MLXSW_REG_TNEEM_LEN 0x0C + +MLXSW_REG_DEFINE(tneem, MLXSW_REG_TNEEM_ID, MLXSW_REG_TNEEM_LEN); + +/* reg_tneem_overlay_ecn + * ECN of the IP header in the overlay network. + * Access: Index + */ +MLXSW_ITEM32(reg, tneem, overlay_ecn, 0x04, 24, 2); + +/* reg_tneem_underlay_ecn + * ECN of the IP header in the underlay network. + * Access: RW + */ +MLXSW_ITEM32(reg, tneem, underlay_ecn, 0x04, 16, 2); + +static inline void mlxsw_reg_tneem_pack(char *payload, u8 overlay_ecn, + u8 underlay_ecn) +{ + MLXSW_REG_ZERO(tneem, payload); + mlxsw_reg_tneem_overlay_ecn_set(payload, overlay_ecn); + mlxsw_reg_tneem_underlay_ecn_set(payload, underlay_ecn); +} + +/* TNDEM - Tunneling NVE Decapsulation ECN Mapping Register + * -------------------------------------------------------- + * The TNDEM register configures the actions that are done in the + * decapsulation. + */ +#define MLXSW_REG_TNDEM_ID 0xA013 +#define MLXSW_REG_TNDEM_LEN 0x0C + +MLXSW_REG_DEFINE(tndem, MLXSW_REG_TNDEM_ID, MLXSW_REG_TNDEM_LEN); + +/* reg_tndem_underlay_ecn + * ECN field of the IP header in the underlay network. + * Access: Index + */ +MLXSW_ITEM32(reg, tndem, underlay_ecn, 0x04, 24, 2); + +/* reg_tndem_overlay_ecn + * ECN field of the IP header in the overlay network. + * Access: Index + */ +MLXSW_ITEM32(reg, tndem, overlay_ecn, 0x04, 16, 2); + +/* reg_tndem_eip_ecn + * Egress IP ECN. ECN field of the IP header of the packet which goes out + * from the decapsulation. + * Access: RW + */ +MLXSW_ITEM32(reg, tndem, eip_ecn, 0x04, 8, 2); + +/* reg_tndem_trap_en + * Trap enable: + * 0 - No trap due to decap ECN + * 1 - Trap enable with trap_id + * Access: RW + */ +MLXSW_ITEM32(reg, tndem, trap_en, 0x08, 28, 4); + +/* reg_tndem_trap_id + * Trap ID. Either DECAP_ECN0 or DECAP_ECN1. + * Reserved when trap_en is '0'. + * Access: RW + */ +MLXSW_ITEM32(reg, tndem, trap_id, 0x08, 0, 9); + +static inline void mlxsw_reg_tndem_pack(char *payload, u8 underlay_ecn, + u8 overlay_ecn, u8 ecn, bool trap_en, + u16 trap_id) +{ + MLXSW_REG_ZERO(tndem, payload); + mlxsw_reg_tndem_underlay_ecn_set(payload, underlay_ecn); + mlxsw_reg_tndem_overlay_ecn_set(payload, overlay_ecn); + mlxsw_reg_tndem_eip_ecn_set(payload, ecn); + mlxsw_reg_tndem_trap_en_set(payload, trap_en); + mlxsw_reg_tndem_trap_id_set(payload, trap_id); +} + +/* TNPC - Tunnel Port Configuration Register + * ----------------------------------------- + * The TNPC register is used for tunnel port configuration. + * Reserved when Spectrum. + */ +#define MLXSW_REG_TNPC_ID 0xA020 +#define MLXSW_REG_TNPC_LEN 0x18 + +MLXSW_REG_DEFINE(tnpc, MLXSW_REG_TNPC_ID, MLXSW_REG_TNPC_LEN); + +enum mlxsw_reg_tnpc_tunnel_port { + MLXSW_REG_TNPC_TUNNEL_PORT_NVE, + MLXSW_REG_TNPC_TUNNEL_PORT_VPLS, + MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL0, + MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL1, +}; + +/* reg_tnpc_tunnel_port + * Tunnel port. + * Access: Index + */ +MLXSW_ITEM32(reg, tnpc, tunnel_port, 0x00, 0, 4); + +/* reg_tnpc_learn_enable_v6 + * During IPv6 underlay decapsulation, whether to learn from tunnel port. + * Access: RW + */ +MLXSW_ITEM32(reg, tnpc, learn_enable_v6, 0x04, 1, 1); + +/* reg_tnpc_learn_enable_v4 + * During IPv4 underlay decapsulation, whether to learn from tunnel port. + * Access: RW + */ +MLXSW_ITEM32(reg, tnpc, learn_enable_v4, 0x04, 0, 1); + +static inline void mlxsw_reg_tnpc_pack(char *payload, + enum mlxsw_reg_tnpc_tunnel_port tport, + bool learn_enable) +{ + MLXSW_REG_ZERO(tnpc, payload); + mlxsw_reg_tnpc_tunnel_port_set(payload, tport); + mlxsw_reg_tnpc_learn_enable_v4_set(payload, learn_enable); + mlxsw_reg_tnpc_learn_enable_v6_set(payload, learn_enable); +} + /* TIGCR - Tunneling IPinIP General Configuration Register * ------------------------------------------------------- * The TIGCR register is used for setting up the IPinIP Tunnel configuration. @@ -8336,8 +8907,15 @@ MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2); */ MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4); +/* reg_sbpr_infi_size + * Size is infinite. + * Access: RW + */ +MLXSW_ITEM32(reg, sbpr, infi_size, 0x04, 31, 1); + /* reg_sbpr_size * Pool size in buffer cells. + * Reserved when infi_size = 1. * Access: RW */ MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24); @@ -8355,13 +8933,15 @@ MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4); static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool, enum mlxsw_reg_sbxx_dir dir, - enum mlxsw_reg_sbpr_mode mode, u32 size) + enum mlxsw_reg_sbpr_mode mode, u32 size, + bool infi_size) { MLXSW_REG_ZERO(sbpr, payload); mlxsw_reg_sbpr_pool_set(payload, pool); mlxsw_reg_sbpr_dir_set(payload, dir); mlxsw_reg_sbpr_mode_set(payload, mode); mlxsw_reg_sbpr_size_set(payload, size); + mlxsw_reg_sbpr_infi_size_set(payload, infi_size); } /* SBCM - Shared Buffer Class Management Register @@ -8409,6 +8989,12 @@ MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24); #define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1 #define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14 +/* reg_sbcm_infi_max + * Max buffer is infinite. + * Access: RW + */ +MLXSW_ITEM32(reg, sbcm, infi_max, 0x1C, 31, 1); + /* reg_sbcm_max_buff * When the pool associated to the port-pg/tclass is configured to * static, Maximum buffer size for the limiter configured in cells. @@ -8418,6 +9004,7 @@ MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24); * 0: 0 * i: (1/128)*2^(i-1), for i=1..14 * 0xFF: Infinity + * Reserved when infi_max = 1. * Access: RW */ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24); @@ -8430,7 +9017,8 @@ MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4); static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, enum mlxsw_reg_sbxx_dir dir, - u32 min_buff, u32 max_buff, u8 pool) + u32 min_buff, u32 max_buff, + bool infi_max, u8 pool) { MLXSW_REG_ZERO(sbcm, payload); mlxsw_reg_sbcm_local_port_set(payload, local_port); @@ -8438,6 +9026,7 @@ static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, mlxsw_reg_sbcm_dir_set(payload, dir); mlxsw_reg_sbcm_min_buff_set(payload, min_buff); mlxsw_reg_sbcm_max_buff_set(payload, max_buff); + mlxsw_reg_sbcm_infi_max_set(payload, infi_max); mlxsw_reg_sbcm_pool_set(payload, pool); } @@ -8810,6 +9399,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mcc), MLXSW_REG(mcda), MLXSW_REG(mgpc), + MLXSW_REG(mprs), + MLXSW_REG(tngcr), + MLXSW_REG(tnumt), + MLXSW_REG(tnqcr), + MLXSW_REG(tnqdr), + MLXSW_REG(tneem), + MLXSW_REG(tndem), + MLXSW_REG(tnpc), MLXSW_REG(tigcr), MLXSW_REG(sbpr), MLXSW_REG(sbcm), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 79a31de7c825..99b341539870 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -46,6 +46,8 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_RIFS, MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES, MLXSW_RES_ID_MAX_LPM_TREES, + MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4, + MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6, /* Internal resources. * Determined by the SW, not queried from the HW. @@ -96,6 +98,8 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, [MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10, [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30, + [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4] = 0x2E02, + [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6] = 0x2E03, }; struct mlxsw_res { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 30bb2c533cec..8a4983adae94 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -21,6 +21,7 @@ #include <linux/dcbnl.h> #include <linux/inetdevice.h> #include <linux/netlink.h> +#include <linux/random.h> #include <net/switchdev.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_mirred.h> @@ -331,7 +332,10 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) return -EINVAL; } if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == - MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor)) + MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && + (rev->minor > req_rev->minor || + (rev->minor == req_rev->minor && + rev->subminor >= req_rev->subminor))) return 0; dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", @@ -2804,6 +2808,13 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) MLXSW_REG_QEEC_MAS_DIS); if (err) return err; + + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, + i + 8, i, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; } /* Map all priorities to traffic class 0. */ @@ -2983,6 +2994,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_qdiscs_init; } + err = mlxsw_sp_port_nve_init(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", + mlxsw_sp_port->local_port); + goto err_port_nve_init; + } + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); if (IS_ERR(mlxsw_sp_port_vlan)) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", @@ -3011,6 +3029,8 @@ err_register_netdev: mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); err_port_vlan_get: + mlxsw_sp_port_nve_fini(mlxsw_sp_port); +err_port_nve_init: mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); err_port_qdiscs_init: mlxsw_sp_port_fids_fini(mlxsw_sp_port); @@ -3050,6 +3070,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp->ports[local_port] = NULL; mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); mlxsw_sp_port_vlan_flush(mlxsw_sp_port); + mlxsw_sp_port_nve_fini(mlxsw_sp_port); mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); mlxsw_sp_port_fids_fini(mlxsw_sp_port); mlxsw_sp_port_dcb_fini(mlxsw_sp_port); @@ -3459,6 +3480,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), /* PKT Sample trap */ @@ -3472,6 +3494,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), + /* NVE traps */ + MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), }; static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) @@ -3656,8 +3680,10 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) { char slcr_pl[MLXSW_REG_SLCR_LEN]; + u32 seed; int err; + get_random_bytes(&seed, sizeof(seed)); mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | MLXSW_REG_SLCR_LAG_HASH_DMAC | MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | @@ -3666,7 +3692,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) MLXSW_REG_SLCR_LAG_HASH_DIP | MLXSW_REG_SLCR_LAG_HASH_SPORT | MLXSW_REG_SLCR_LAG_HASH_DPORT | - MLXSW_REG_SLCR_LAG_HASH_IPPROTO); + MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); if (err) return err; @@ -3779,6 +3805,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_afa_init; } + err = mlxsw_sp_nve_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); + goto err_nve_init; + } + err = mlxsw_sp_router_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); @@ -3825,6 +3857,8 @@ err_acl_init: err_netdev_notifier: mlxsw_sp_router_fini(mlxsw_sp); err_router_init: + mlxsw_sp_nve_fini(mlxsw_sp); +err_nve_init: mlxsw_sp_afa_fini(mlxsw_sp); err_afa_init: mlxsw_sp_counter_pool_fini(mlxsw_sp); @@ -3857,6 +3891,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; + mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); } @@ -3871,6 +3906,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; + mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); } @@ -3884,6 +3920,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_acl_fini(mlxsw_sp); unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); mlxsw_sp_router_fini(mlxsw_sp); + mlxsw_sp_nve_fini(mlxsw_sp); mlxsw_sp_afa_fini(mlxsw_sp); mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); @@ -4550,6 +4587,41 @@ static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); } +static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) +{ + unsigned int num_vxlans = 0; + struct net_device *dev; + struct list_head *iter; + + netdev_for_each_lower_dev(br_dev, dev, iter) { + if (netif_is_vxlan(dev)) + num_vxlans++; + } + + return num_vxlans > 1; +} + +static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, + struct netlink_ext_ack *extack) +{ + if (br_multicast_enabled(br_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); + return false; + } + + if (br_vlan_enabled(br_dev)) { + NL_SET_ERR_MSG_MOD(extack, "VLAN filtering can not be enabled on a bridge with a VxLAN device"); + return false; + } + + if (mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); + return false; + } + + return true; +} + static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, struct net_device *dev, unsigned long event, void *ptr) @@ -4579,6 +4651,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, } if (!info->linking) break; + if (netif_is_bridge_master(upper_dev) && + !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && + mlxsw_sp_bridge_has_vxlan(upper_dev) && + !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) + return -EOPNOTSUPP; if (netdev_has_any_upper_dev(upper_dev) && (!netif_is_bridge_master(upper_dev) || !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, @@ -4736,6 +4813,11 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, } if (!info->linking) break; + if (netif_is_bridge_master(upper_dev) && + !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && + mlxsw_sp_bridge_has_vxlan(upper_dev) && + !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) + return -EOPNOTSUPP; if (netdev_has_any_upper_dev(upper_dev) && (!netif_is_bridge_master(upper_dev) || !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, @@ -4882,6 +4964,63 @@ static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) return netif_is_l3_master(info->upper_dev); } +static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, + unsigned long event, void *ptr) +{ + struct netdev_notifier_changeupper_info *cu_info; + struct netdev_notifier_info *info = ptr; + struct netlink_ext_ack *extack; + struct net_device *upper_dev; + + extack = netdev_notifier_info_to_extack(info); + + switch (event) { + case NETDEV_CHANGEUPPER: + cu_info = container_of(info, + struct netdev_notifier_changeupper_info, + info); + upper_dev = cu_info->upper_dev; + if (!netif_is_bridge_master(upper_dev)) + return 0; + if (!mlxsw_sp_lower_get(upper_dev)) + return 0; + if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) + return -EOPNOTSUPP; + if (cu_info->linking) { + if (!netif_running(dev)) + return 0; + return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, + dev, extack); + } else { + mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, upper_dev, dev); + } + break; + case NETDEV_PRE_UP: + upper_dev = netdev_master_upper_dev_get(dev); + if (!upper_dev) + return 0; + if (!netif_is_bridge_master(upper_dev)) + return 0; + if (!mlxsw_sp_lower_get(upper_dev)) + return 0; + return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, + extack); + case NETDEV_DOWN: + upper_dev = netdev_master_upper_dev_get(dev); + if (!upper_dev) + return 0; + if (!netif_is_bridge_master(upper_dev)) + return 0; + if (!mlxsw_sp_lower_get(upper_dev)) + return 0; + mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, upper_dev, dev); + break; + } + + return 0; +} + static int mlxsw_sp_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -4898,6 +5037,8 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, } mlxsw_sp_span_respin(mlxsw_sp); + if (netif_is_vxlan(dev)) + err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, event, ptr); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3cdb7aca90b7..0875a79cbe7b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -16,6 +16,7 @@ #include <net/psample.h> #include <net/pkt_cls.h> #include <net/red.h> +#include <net/vxlan.h> #include "port.h" #include "core.h" @@ -55,6 +56,8 @@ enum mlxsw_sp_resource_id { struct mlxsw_sp_port; struct mlxsw_sp_rif; struct mlxsw_sp_span_entry; +enum mlxsw_sp_l3proto; +union mlxsw_sp_l3addr; struct mlxsw_sp_upper { struct net_device *dev; @@ -113,9 +116,11 @@ struct mlxsw_sp_acl; struct mlxsw_sp_counter_pool; struct mlxsw_sp_fid_core; struct mlxsw_sp_kvdl; +struct mlxsw_sp_nve; struct mlxsw_sp_kvdl_ops; struct mlxsw_sp_mr_tcam_ops; struct mlxsw_sp_acl_tcam_ops; +struct mlxsw_sp_nve_ops; struct mlxsw_sp { struct mlxsw_sp_port **ports; @@ -132,6 +137,7 @@ struct mlxsw_sp { struct mlxsw_sp_acl *acl; struct mlxsw_sp_fid_core *fid_core; struct mlxsw_sp_kvdl *kvdl; + struct mlxsw_sp_nve *nve; struct notifier_block netdevice_nb; struct mlxsw_sp_counter_pool *counter_pool; @@ -146,6 +152,7 @@ struct mlxsw_sp { const struct mlxsw_afk_ops *afk_ops; const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops; const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops; + const struct mlxsw_sp_nve_ops **nve_ops_arr; }; static inline struct mlxsw_sp_upper * @@ -235,6 +242,25 @@ struct mlxsw_sp_port { struct mlxsw_sp_acl_block *eg_acl_block; }; +static inline struct net_device * +mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev) +{ + struct net_device *dev; + struct list_head *iter; + + netdev_for_each_lower_dev(br_dev, dev, iter) { + if (netif_is_vxlan(dev)) + return dev; + } + + return NULL; +} + +static inline bool mlxsw_sp_bridge_has_vxlan(struct net_device *br_dev) +{ + return !!mlxsw_sp_bridge_vxlan_dev_find(br_dev); +} + static inline bool mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port) { @@ -330,6 +356,13 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *br_dev); bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, const struct net_device *br_dev); +int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev, + const struct net_device *vxlan_dev, + struct netlink_ext_ack *extack); +void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev, + const struct net_device *vxlan_dev); /* spectrum.c */ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -383,6 +416,17 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) #endif /* spectrum_router.c */ +enum mlxsw_sp_l3proto { + MLXSW_SP_L3_PROTO_IPV4, + MLXSW_SP_L3_PROTO_IPV6, +#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1) +}; + +union mlxsw_sp_l3addr { + __be32 addr4; + struct in6_addr addr6; +}; + int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); @@ -416,6 +460,19 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, struct net_device *dev); +struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev); +u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); +struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif); +int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, + enum mlxsw_sp_l3proto ul_proto, + const union mlxsw_sp_l3addr *ul_sip, + u32 tunnel_index); +void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, + enum mlxsw_sp_l3proto ul_proto, + const union mlxsw_sp_l3addr *ul_sip); +int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id, + u16 *vr_id); /* spectrum_kvdl.c */ enum mlxsw_sp_kvdl_entry_type { @@ -423,6 +480,7 @@ enum mlxsw_sp_kvdl_entry_type { MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, + MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, }; static inline unsigned int @@ -433,6 +491,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type) case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */ case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */ case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: /* fall through */ default: return 1; } @@ -662,6 +721,16 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_prio_qopt_offload *p); /* spectrum_fid.c */ +struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp, + __be32 vni); +int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni); +int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid, + u32 nve_flood_index); +void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid); +bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid); +int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni); +void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid); +bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid); int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_flood_type packet_type, u8 local_port, bool member); @@ -680,6 +749,8 @@ u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid); struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid); struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp, int br_ifindex); +struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp, + int br_ifindex); struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp, u16 rif_index); struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp); @@ -725,4 +796,39 @@ extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops; /* spectrum2_mr_tcam.c */ extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops; +/* spectrum_nve.c */ +enum mlxsw_sp_nve_type { + MLXSW_SP_NVE_TYPE_VXLAN, +}; + +struct mlxsw_sp_nve_params { + enum mlxsw_sp_nve_type type; + __be32 vni; + const struct net_device *dev; +}; + +extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[]; +extern const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[]; + +int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *fid, + enum mlxsw_sp_l3proto proto, + union mlxsw_sp_l3addr *addr); +void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *fid, + enum mlxsw_sp_l3proto proto, + union mlxsw_sp_l3addr *addr); +u32 mlxsw_sp_nve_decap_tunnel_index_get(const struct mlxsw_sp *mlxsw_sp); +bool mlxsw_sp_nve_ipv4_route_is_decap(const struct mlxsw_sp *mlxsw_sp, + u32 tb_id, __be32 addr); +int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, + struct mlxsw_sp_nve_params *params, + struct netlink_ext_ack *extack); +void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *fid); +int mlxsw_sp_port_nve_init(struct mlxsw_sp_port *mlxsw_sp_port); +void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port); +int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp); + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c index 68c8b148bef2..8d14770766b4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c @@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = { MAX_KVD_ACTION_SETS), MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE), MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE), + MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE), }; #define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 3589432d1643..12c61e0cc570 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -25,28 +25,52 @@ struct mlxsw_cp_sb_occ { struct mlxsw_sp_sb_cm { u32 min_buff; u32 max_buff; - u8 pool; + u16 pool_index; struct mlxsw_cp_sb_occ occ; }; +#define MLXSW_SP_SB_INFI -1U + struct mlxsw_sp_sb_pm { u32 min_buff; u32 max_buff; struct mlxsw_cp_sb_occ occ; }; -#define MLXSW_SP_SB_POOL_COUNT 4 -#define MLXSW_SP_SB_TC_COUNT 8 +struct mlxsw_sp_sb_pool_des { + enum mlxsw_reg_sbxx_dir dir; + u8 pool; +}; + +/* Order ingress pools before egress pools. */ +static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = { + {MLXSW_REG_SBXX_DIR_INGRESS, 0}, + {MLXSW_REG_SBXX_DIR_INGRESS, 1}, + {MLXSW_REG_SBXX_DIR_INGRESS, 2}, + {MLXSW_REG_SBXX_DIR_INGRESS, 3}, + {MLXSW_REG_SBXX_DIR_EGRESS, 0}, + {MLXSW_REG_SBXX_DIR_EGRESS, 1}, + {MLXSW_REG_SBXX_DIR_EGRESS, 2}, + {MLXSW_REG_SBXX_DIR_EGRESS, 3}, + {MLXSW_REG_SBXX_DIR_EGRESS, 15}, +}; + +#define MLXSW_SP_SB_POOL_DESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pool_dess) + +#define MLXSW_SP_SB_ING_TC_COUNT 8 +#define MLXSW_SP_SB_EG_TC_COUNT 16 struct mlxsw_sp_sb_port { - struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT]; - struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT]; + struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT]; + struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT]; + struct mlxsw_sp_sb_pm pms[MLXSW_SP_SB_POOL_DESS_LEN]; }; struct mlxsw_sp_sb { - struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT]; + struct mlxsw_sp_sb_pr prs[MLXSW_SP_SB_POOL_DESS_LEN]; struct mlxsw_sp_sb_port *ports; u32 cell_size; + u64 sb_size; }; u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells) @@ -60,95 +84,122 @@ u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes) } static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp, - u8 pool, - enum mlxsw_reg_sbxx_dir dir) + u16 pool_index) { - return &mlxsw_sp->sb->prs[dir][pool]; + return &mlxsw_sp->sb->prs[pool_index]; +} + +static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir) +{ + if (dir == MLXSW_REG_SBXX_DIR_INGRESS) + return pg_buff < MLXSW_SP_SB_ING_TC_COUNT; + else + return pg_buff < MLXSW_SP_SB_EG_TC_COUNT; } static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, u8 pg_buff, enum mlxsw_reg_sbxx_dir dir) { - return &mlxsw_sp->sb->ports[local_port].cms[dir][pg_buff]; + struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port]; + + WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir)); + if (dir == MLXSW_REG_SBXX_DIR_INGRESS) + return &sb_port->ing_cms[pg_buff]; + else + return &sb_port->eg_cms[pg_buff]; } static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp, - u8 local_port, u8 pool, - enum mlxsw_reg_sbxx_dir dir) + u8 local_port, u16 pool_index) { - return &mlxsw_sp->sb->ports[local_port].pms[dir][pool]; + return &mlxsw_sp->sb->ports[local_port].pms[pool_index]; } -static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool, - enum mlxsw_reg_sbxx_dir dir, - enum mlxsw_reg_sbpr_mode mode, u32 size) +static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index, + enum mlxsw_reg_sbpr_mode mode, + u32 size, bool infi_size) { + const struct mlxsw_sp_sb_pool_des *des = + &mlxsw_sp_sb_pool_dess[pool_index]; char sbpr_pl[MLXSW_REG_SBPR_LEN]; struct mlxsw_sp_sb_pr *pr; int err; - mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size); + mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode, + size, infi_size); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); if (err) return err; - pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + if (infi_size) + size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size); + pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); pr->mode = mode; pr->size = size; return 0; } static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, - u8 pg_buff, enum mlxsw_reg_sbxx_dir dir, - u32 min_buff, u32 max_buff, u8 pool) + u8 pg_buff, u32 min_buff, u32 max_buff, + bool infi_max, u16 pool_index) { + const struct mlxsw_sp_sb_pool_des *des = + &mlxsw_sp_sb_pool_dess[pool_index]; char sbcm_pl[MLXSW_REG_SBCM_LEN]; + struct mlxsw_sp_sb_cm *cm; int err; - mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir, - min_buff, max_buff, pool); + mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir, + min_buff, max_buff, infi_max, des->pool); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); if (err) return err; - if (pg_buff < MLXSW_SP_SB_TC_COUNT) { - struct mlxsw_sp_sb_cm *cm; - cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir); + if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) { + if (infi_max) + max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, + mlxsw_sp->sb->sb_size); + + cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, + des->dir); cm->min_buff = min_buff; cm->max_buff = max_buff; - cm->pool = pool; + cm->pool_index = pool_index; } return 0; } static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, - u8 pool, enum mlxsw_reg_sbxx_dir dir, - u32 min_buff, u32 max_buff) + u16 pool_index, u32 min_buff, u32 max_buff) { + const struct mlxsw_sp_sb_pool_des *des = + &mlxsw_sp_sb_pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; struct mlxsw_sp_sb_pm *pm; int err; - mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false, min_buff, max_buff); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl); if (err) return err; - pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir); + pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); pm->min_buff = min_buff; pm->max_buff = max_buff; return 0; } static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port, - u8 pool, enum mlxsw_reg_sbxx_dir dir, - struct list_head *bulk_list) + u16 pool_index, struct list_head *bulk_list) { + const struct mlxsw_sp_sb_pool_des *des = + &mlxsw_sp_sb_pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; - mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0); + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, + true, 0, 0); return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, bulk_list, NULL, 0); } @@ -163,14 +214,16 @@ static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core, } static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, - u8 pool, enum mlxsw_reg_sbxx_dir dir, - struct list_head *bulk_list) + u16 pool_index, struct list_head *bulk_list) { + const struct mlxsw_sp_sb_pool_des *des = + &mlxsw_sp_sb_pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; struct mlxsw_sp_sb_pm *pm; - pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir); - mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0); + pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, + false, 0, 0); return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, bulk_list, mlxsw_sp_sb_pm_occ_query_cb, @@ -254,63 +307,54 @@ static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp) .size = _size, \ } -static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = { +static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs[] = { + /* Ingress pools. */ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_INGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_INGRESS_MNG_SIZE), -}; - -#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress) - -static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = { + /* Egress pools. */ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI), }; -#define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress) +#define MLXSW_SP_SB_PRS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs) -static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_reg_sbxx_dir dir, - const struct mlxsw_sp_sb_pr *prs, - size_t prs_len) +static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_sb_pr *prs, + size_t prs_len) { int i; int err; for (i = 0; i < prs_len; i++) { - u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size); - - err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size); + u32 size = prs[i].size; + u32 size_cells; + + if (size == MLXSW_SP_SB_INFI) { + err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode, + 0, true); + } else { + size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size); + err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode, + size_cells, false); + } if (err) return err; } return 0; } -static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp) -{ - int err; - - err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS, - mlxsw_sp_sb_prs_ingress, - MLXSW_SP_SB_PRS_INGRESS_LEN); - if (err) - return err; - return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS, - mlxsw_sp_sb_prs_egress, - MLXSW_SP_SB_PRS_EGRESS_LEN); -} - #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \ { \ .min_buff = _min_buff, \ .max_buff = _max_buff, \ - .pool = _pool, \ + .pool_index = _pool, \ } static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { @@ -329,38 +373,38 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress) static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(0, 140000, 15), - MLXSW_SP_SB_CM(1, 0xff, 0), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(1500, 9, 4), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(1, 0xff, 4), }; #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress) -#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0) +#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4) static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, @@ -390,6 +434,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \ ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms) +static bool +mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index) +{ + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); + + return pr->mode == MLXSW_REG_SBPR_MODE_STATIC; +} + static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, enum mlxsw_reg_sbxx_dir dir, const struct mlxsw_sp_sb_cm *cms, @@ -401,16 +453,29 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, for (i = 0; i < cms_len; i++) { const struct mlxsw_sp_sb_cm *cm; u32 min_buff; + u32 max_buff; if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS) continue; /* PG number 8 does not exist, skip it */ cm = &cms[i]; - /* All pools are initialized using dynamic thresholds, - * therefore 'max_buff' isn't specified in cells. - */ + if (WARN_ON(mlxsw_sp_sb_pool_dess[cm->pool_index].dir != dir)) + continue; + min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff); - err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir, - min_buff, cm->max_buff, cm->pool); + max_buff = cm->max_buff; + if (max_buff == MLXSW_SP_SB_INFI) { + err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, + min_buff, 0, + true, cm->pool_index); + } else { + if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, + cm->pool_index)) + max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, + max_buff); + err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, + min_buff, max_buff, + false, cm->pool_index); + } if (err) return err; } @@ -448,91 +513,74 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) .max_buff = _max_buff, \ } -static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = { +static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { + /* Ingress pools. */ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), -}; - -#define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress) - -static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = { + /* Egress pools. */ MLXSW_SP_SB_PM(0, 7), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_PM(10000, 90000), }; -#define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress) +#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms) -static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, - enum mlxsw_reg_sbxx_dir dir, - const struct mlxsw_sp_sb_pm *pms, - size_t pms_len) +static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; int i; int err; - for (i = 0; i < pms_len; i++) { - const struct mlxsw_sp_sb_pm *pm; + for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) { + const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp_sb_pms[i]; + u32 max_buff; + u32 min_buff; - pm = &pms[i]; - err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir, - pm->min_buff, pm->max_buff); + min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff); + max_buff = pm->max_buff; + if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i)) + max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff); + err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port, + i, min_buff, max_buff); if (err) return err; } return 0; } -static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) -{ - int err; - - err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp, - mlxsw_sp_port->local_port, - MLXSW_REG_SBXX_DIR_INGRESS, - mlxsw_sp_sb_pms_ingress, - MLXSW_SP_SB_PMS_INGRESS_LEN); - if (err) - return err; - return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp, - mlxsw_sp_port->local_port, - MLXSW_REG_SBXX_DIR_EGRESS, - mlxsw_sp_sb_pms_egress, - MLXSW_SP_SB_PMS_EGRESS_LEN); -} - struct mlxsw_sp_sb_mm { u32 min_buff; u32 max_buff; - u8 pool; + u16 pool_index; }; #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \ { \ .min_buff = _min_buff, \ .max_buff = _max_buff, \ - .pool = _pool, \ + .pool_index = _pool, \ } static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), - MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6, 4), }; #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) @@ -544,16 +592,18 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) int err; for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) { + const struct mlxsw_sp_sb_pool_des *des; const struct mlxsw_sp_sb_mm *mc; u32 min_buff; mc = &mlxsw_sp_sb_mms[i]; - /* All pools are initialized using dynamic thresholds, - * therefore 'max_buff' isn't specified in cells. + des = &mlxsw_sp_sb_pool_dess[mc->pool_index]; + /* All pools used by sb_mm's are initialized using dynamic + * thresholds, therefore 'max_buff' isn't specified in cells. */ min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff); mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff, - mc->pool); + des->pool); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl); if (err) return err; @@ -561,9 +611,24 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) return 0; } +static void mlxsw_sp_pool_count(u16 *p_ingress_len, u16 *p_egress_len) +{ + int i; + + for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; ++i) + if (mlxsw_sp_sb_pool_dess[i].dir == MLXSW_REG_SBXX_DIR_EGRESS) + goto out; + WARN(1, "No egress pools\n"); + +out: + *p_ingress_len = i; + *p_egress_len = MLXSW_SP_SB_POOL_DESS_LEN - i; +} + int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) { - u64 sb_size; + u16 ing_pool_count; + u16 eg_pool_count; int err; if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE)) @@ -571,17 +636,19 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE)) return -EIO; - sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE); mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL); if (!mlxsw_sp->sb) return -ENOMEM; mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE); + mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MAX_BUFFER_SIZE); err = mlxsw_sp_sb_ports_init(mlxsw_sp); if (err) goto err_sb_ports_init; - err = mlxsw_sp_sb_prs_init(mlxsw_sp); + err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp_sb_prs, + MLXSW_SP_SB_PRS_LEN); if (err) goto err_sb_prs_init; err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); @@ -590,11 +657,13 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) err = mlxsw_sp_sb_mms_init(mlxsw_sp); if (err) goto err_sb_mms_init; - err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size, - MLXSW_SP_SB_POOL_COUNT, - MLXSW_SP_SB_POOL_COUNT, - MLXSW_SP_SB_TC_COUNT, - MLXSW_SP_SB_TC_COUNT); + mlxsw_sp_pool_count(&ing_pool_count, &eg_pool_count); + err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, + mlxsw_sp->sb->sb_size, + ing_pool_count, + eg_pool_count, + MLXSW_SP_SB_ING_TC_COUNT, + MLXSW_SP_SB_EG_TC_COUNT); if (err) goto err_devlink_sb_register; @@ -632,36 +701,15 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) return err; } -static u8 pool_get(u16 pool_index) -{ - return pool_index % MLXSW_SP_SB_POOL_COUNT; -} - -static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir) -{ - u16 pool_index; - - pool_index = pool; - if (dir == MLXSW_REG_SBXX_DIR_EGRESS) - pool_index += MLXSW_SP_SB_POOL_COUNT; - return pool_index; -} - -static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index) -{ - return pool_index < MLXSW_SP_SB_POOL_COUNT ? - MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS; -} - int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info) { + enum mlxsw_reg_sbxx_dir dir = mlxsw_sp_sb_pool_dess[pool_index].dir; struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - u8 pool = pool_get(pool_index); - enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); - struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + struct mlxsw_sp_sb_pr *pr; + pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); pool_info->pool_type = (enum devlink_sb_pool_type) dir; pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size); pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode; @@ -674,34 +722,32 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size); - u8 pool = pool_get(pool_index); - enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); enum mlxsw_reg_sbpr_mode mode; if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) return -EINVAL; mode = (enum mlxsw_reg_sbpr_mode) threshold_type; - return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size); + return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode, + pool_size, false); } #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */ -static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool, - enum mlxsw_reg_sbxx_dir dir, u32 max_buff) +static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index, + u32 max_buff) { - struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff); } -static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool, - enum mlxsw_reg_sbxx_dir dir, u32 threshold, - u32 *p_max_buff) +static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index, + u32 threshold, u32 *p_max_buff) { - struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) { int val; @@ -725,12 +771,10 @@ int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u8 local_port = mlxsw_sp_port->local_port; - u8 pool = pool_get(pool_index); - enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, - pool, dir); + pool_index); - *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir, + *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index, pm->max_buff); return 0; } @@ -743,17 +787,15 @@ int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u8 local_port = mlxsw_sp_port->local_port; - u8 pool = pool_get(pool_index); - enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); u32 max_buff; int err; - err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, threshold, &max_buff); if (err) return err; - return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir, + return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index, 0, max_buff); } @@ -771,9 +813,9 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir); - *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir, + *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index, cm->max_buff); - *p_pool_index = pool_index_get(cm->pool, dir); + *p_pool_index = cm->pool_index; return 0; } @@ -788,24 +830,24 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, u8 local_port = mlxsw_sp_port->local_port; u8 pg_buff = tc_index; enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; - u8 pool = pool_get(pool_index); u32 max_buff; int err; - if (dir != dir_get(pool_index)) + if (dir != mlxsw_sp_sb_pool_dess[pool_index].dir) return -EINVAL; - err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, threshold, &max_buff); if (err) return err; - return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir, - 0, max_buff, pool); + return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, + 0, max_buff, false, pool_index); } #define MASKED_COUNT_MAX \ - (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2)) + (MLXSW_REG_SBSR_REC_MAX_COUNT / \ + (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT)) struct mlxsw_sp_sb_sr_occ_query_cb_ctx { u8 masked_count; @@ -831,7 +873,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; - for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) { cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, MLXSW_REG_SBXX_DIR_INGRESS); mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++, @@ -845,7 +887,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; - for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) { cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, MLXSW_REG_SBXX_DIR_EGRESS); mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++, @@ -880,23 +922,17 @@ next_batch: local_port_1 = local_port; masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, false); - for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); + for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); - } for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); - for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) { - err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, - MLXSW_REG_SBXX_DIR_INGRESS, - &bulk_list); - if (err) - goto out; + for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) { err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, - MLXSW_REG_SBXX_DIR_EGRESS, &bulk_list); if (err) goto out; @@ -945,23 +981,17 @@ next_batch: local_port++; masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, true); - for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); + for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); - } for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); - for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) { - err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, - MLXSW_REG_SBXX_DIR_INGRESS, - &bulk_list); - if (err) - goto out; + for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) { err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, - MLXSW_REG_SBXX_DIR_EGRESS, &bulk_list); if (err) goto out; @@ -994,10 +1024,8 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u8 local_port = mlxsw_sp_port->local_port; - u8 pool = pool_get(pool_index); - enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, - pool, dir); + pool_index); *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur); *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 715d24ff937e..a3db033d7399 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -6,6 +6,7 @@ #include <linux/if_vlan.h> #include <linux/if_bridge.h> #include <linux/netdevice.h> +#include <linux/rhashtable.h> #include <linux/rtnetlink.h> #include "spectrum.h" @@ -14,6 +15,7 @@ struct mlxsw_sp_fid_family; struct mlxsw_sp_fid_core { + struct rhashtable vni_ht; struct mlxsw_sp_fid_family *fid_family_arr[MLXSW_SP_FID_TYPE_MAX]; unsigned int *port_fid_mappings; }; @@ -24,6 +26,12 @@ struct mlxsw_sp_fid { unsigned int ref_count; u16 fid_index; struct mlxsw_sp_fid_family *fid_family; + + struct rhash_head vni_ht_node; + __be32 vni; + u32 nve_flood_index; + u8 vni_valid:1, + nve_flood_index_valid:1; }; struct mlxsw_sp_fid_8021q { @@ -36,6 +44,12 @@ struct mlxsw_sp_fid_8021d { int br_ifindex; }; +static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = { + .key_len = sizeof_field(struct mlxsw_sp_fid, vni), + .key_offset = offsetof(struct mlxsw_sp_fid, vni), + .head_offset = offsetof(struct mlxsw_sp_fid, vni_ht_node), +}; + struct mlxsw_sp_flood_table { enum mlxsw_sp_flood_type packet_type; enum mlxsw_reg_sfgc_bridge_type bridge_type; @@ -56,6 +70,11 @@ struct mlxsw_sp_fid_ops { struct mlxsw_sp_port *port, u16 vid); void (*port_vid_unmap)(struct mlxsw_sp_fid *fid, struct mlxsw_sp_port *port, u16 vid); + int (*vni_set)(struct mlxsw_sp_fid *fid, __be32 vni); + void (*vni_clear)(struct mlxsw_sp_fid *fid); + int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid, + u32 nve_flood_index); + void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid); }; struct mlxsw_sp_fid_family { @@ -94,6 +113,117 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = { [MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types, }; +struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp, + __be32 vni) +{ + struct mlxsw_sp_fid *fid; + + fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->vni_ht, &vni, + mlxsw_sp_fid_vni_ht_params); + if (fid) + fid->ref_count++; + + return fid; +} + +int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni) +{ + if (!fid->vni_valid) + return -EINVAL; + + *vni = fid->vni; + + return 0; +} + +int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid, + u32 nve_flood_index) +{ + struct mlxsw_sp_fid_family *fid_family = fid->fid_family; + const struct mlxsw_sp_fid_ops *ops = fid_family->ops; + int err; + + if (WARN_ON(!ops->nve_flood_index_set || fid->nve_flood_index_valid)) + return -EINVAL; + + err = ops->nve_flood_index_set(fid, nve_flood_index); + if (err) + return err; + + fid->nve_flood_index = nve_flood_index; + fid->nve_flood_index_valid = true; + + return 0; +} + +void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid) +{ + struct mlxsw_sp_fid_family *fid_family = fid->fid_family; + const struct mlxsw_sp_fid_ops *ops = fid_family->ops; + + if (WARN_ON(!ops->nve_flood_index_clear || !fid->nve_flood_index_valid)) + return; + + fid->nve_flood_index_valid = false; + ops->nve_flood_index_clear(fid); +} + +bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid) +{ + return fid->nve_flood_index_valid; +} + +int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni) +{ + struct mlxsw_sp_fid_family *fid_family = fid->fid_family; + const struct mlxsw_sp_fid_ops *ops = fid_family->ops; + struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp; + int err; + + if (WARN_ON(!ops->vni_set || fid->vni_valid)) + return -EINVAL; + + fid->vni = vni; + err = rhashtable_lookup_insert_fast(&mlxsw_sp->fid_core->vni_ht, + &fid->vni_ht_node, + mlxsw_sp_fid_vni_ht_params); + if (err) + return err; + + err = ops->vni_set(fid, vni); + if (err) + goto err_vni_set; + + fid->vni_valid = true; + + return 0; + +err_vni_set: + rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node, + mlxsw_sp_fid_vni_ht_params); + return err; +} + +void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid) +{ + struct mlxsw_sp_fid_family *fid_family = fid->fid_family; + const struct mlxsw_sp_fid_ops *ops = fid_family->ops; + struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp; + + if (WARN_ON(!ops->vni_clear || !fid->vni_valid)) + return; + + fid->vni_valid = false; + ops->vni_clear(fid); + rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node, + mlxsw_sp_fid_vni_ht_params); +} + +bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid) +{ + return fid->vni_valid; +} + static const struct mlxsw_sp_flood_table * mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid, enum mlxsw_sp_flood_type packet_type) @@ -217,6 +347,21 @@ static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); } +static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index, + __be32 vni, bool vni_valid, u32 nve_flood_index, + bool nve_flood_index_valid) +{ + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid_index, + 0); + mlxsw_reg_sfmr_vv_set(sfmr_pl, vni_valid); + mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(vni)); + mlxsw_reg_sfmr_vtfp_set(sfmr_pl, nve_flood_index_valid); + mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, nve_flood_index); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + static int mlxsw_sp_fid_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index, u16 vid, bool valid) { @@ -393,6 +538,8 @@ static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid) static void mlxsw_sp_fid_8021d_deconfigure(struct mlxsw_sp_fid *fid) { + if (fid->vni_valid) + mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid); mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false); } @@ -531,6 +678,41 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid, mlxsw_sp_port->local_port, vid, false); } +static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid, __be32 vni) +{ + struct mlxsw_sp_fid_family *fid_family = fid->fid_family; + + return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, vni, + true, fid->nve_flood_index, + fid->nve_flood_index_valid); +} + +static void mlxsw_sp_fid_8021d_vni_clear(struct mlxsw_sp_fid *fid) +{ + struct mlxsw_sp_fid_family *fid_family = fid->fid_family; + + mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, 0, false, + fid->nve_flood_index, fid->nve_flood_index_valid); +} + +static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid, + u32 nve_flood_index) +{ + struct mlxsw_sp_fid_family *fid_family = fid->fid_family; + + return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, + fid->vni, fid->vni_valid, nve_flood_index, + true); +} + +static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid) +{ + struct mlxsw_sp_fid_family *fid_family = fid->fid_family; + + mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, fid->vni, + fid->vni_valid, 0, false); +} + static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = { .setup = mlxsw_sp_fid_8021d_setup, .configure = mlxsw_sp_fid_8021d_configure, @@ -540,6 +722,10 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = { .flood_index = mlxsw_sp_fid_8021d_flood_index, .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map, .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap, + .vni_set = mlxsw_sp_fid_8021d_vni_set, + .vni_clear = mlxsw_sp_fid_8021d_vni_clear, + .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set, + .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear, }; static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = { @@ -708,14 +894,12 @@ static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = { [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family, }; -static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_sp_fid_type type, - const void *arg) +static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_fid_type type, + const void *arg) { struct mlxsw_sp_fid_family *fid_family; struct mlxsw_sp_fid *fid; - u16 fid_index; - int err; fid_family = mlxsw_sp->fid_core->fid_family_arr[type]; list_for_each_entry(fid, &fid_family->fids_list, list) { @@ -725,6 +909,23 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp, return fid; } + return NULL; +} + +static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_fid_type type, + const void *arg) +{ + struct mlxsw_sp_fid_family *fid_family; + struct mlxsw_sp_fid *fid; + u16 fid_index; + int err; + + fid = mlxsw_sp_fid_lookup(mlxsw_sp, type, arg); + if (fid) + return fid; + + fid_family = mlxsw_sp->fid_core->fid_family_arr[type]; fid = kzalloc(fid_family->fid_size, GFP_KERNEL); if (!fid) return ERR_PTR(-ENOMEM); @@ -784,6 +985,13 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D, &br_ifindex); } +struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp, + int br_ifindex) +{ + return mlxsw_sp_fid_lookup(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D, + &br_ifindex); +} + struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp, u16 rif_index) { @@ -918,6 +1126,10 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp) return -ENOMEM; mlxsw_sp->fid_core = fid_core; + err = rhashtable_init(&fid_core->vni_ht, &mlxsw_sp_fid_vni_ht_params); + if (err) + goto err_rhashtable_init; + fid_core->port_fid_mappings = kcalloc(max_ports, sizeof(unsigned int), GFP_KERNEL); if (!fid_core->port_fid_mappings) { @@ -944,6 +1156,8 @@ err_fid_ops_register: } kfree(fid_core->port_fid_mappings); err_alloc_port_fid_mappings: + rhashtable_destroy(&fid_core->vni_ht); +err_rhashtable_init: kfree(fid_core); return err; } @@ -957,5 +1171,6 @@ void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_fid_family_unregister(mlxsw_sp, fid_core->fid_family_arr[i]); kfree(fid_core->port_fid_mappings); + rhashtable_destroy(&fid_core->vni_ht); kfree(fid_core); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c new file mode 100644 index 000000000000..ad06d9969bc1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c @@ -0,0 +1,982 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#include <linux/err.h> +#include <linux/gfp.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/netlink.h> +#include <linux/rtnetlink.h> +#include <linux/slab.h> +#include <net/inet_ecn.h> +#include <net/ipv6.h> + +#include "reg.h" +#include "spectrum.h" +#include "spectrum_nve.h" + +const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[] = { + [MLXSW_SP_NVE_TYPE_VXLAN] = &mlxsw_sp1_nve_vxlan_ops, +}; + +const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[] = { + [MLXSW_SP_NVE_TYPE_VXLAN] = &mlxsw_sp2_nve_vxlan_ops, +}; + +struct mlxsw_sp_nve_mc_entry; +struct mlxsw_sp_nve_mc_record; +struct mlxsw_sp_nve_mc_list; + +struct mlxsw_sp_nve_mc_record_ops { + enum mlxsw_reg_tnumt_record_type type; + int (*entry_add)(struct mlxsw_sp_nve_mc_record *mc_record, + struct mlxsw_sp_nve_mc_entry *mc_entry, + const union mlxsw_sp_l3addr *addr); + void (*entry_del)(const struct mlxsw_sp_nve_mc_record *mc_record, + const struct mlxsw_sp_nve_mc_entry *mc_entry); + void (*entry_set)(const struct mlxsw_sp_nve_mc_record *mc_record, + const struct mlxsw_sp_nve_mc_entry *mc_entry, + char *tnumt_pl, unsigned int entry_index); + bool (*entry_compare)(const struct mlxsw_sp_nve_mc_record *mc_record, + const struct mlxsw_sp_nve_mc_entry *mc_entry, + const union mlxsw_sp_l3addr *addr); +}; + +struct mlxsw_sp_nve_mc_list_key { + u16 fid_index; +}; + +struct mlxsw_sp_nve_mc_ipv6_entry { + struct in6_addr addr6; + u32 addr6_kvdl_index; +}; + +struct mlxsw_sp_nve_mc_entry { + union { + __be32 addr4; + struct mlxsw_sp_nve_mc_ipv6_entry ipv6_entry; + }; + u8 valid:1; +}; + +struct mlxsw_sp_nve_mc_record { + struct list_head list; + enum mlxsw_sp_l3proto proto; + unsigned int num_entries; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_nve_mc_list *mc_list; + const struct mlxsw_sp_nve_mc_record_ops *ops; + u32 kvdl_index; + struct mlxsw_sp_nve_mc_entry entries[0]; +}; + +struct mlxsw_sp_nve_mc_list { + struct list_head records_list; + struct rhash_head ht_node; + struct mlxsw_sp_nve_mc_list_key key; +}; + +static const struct rhashtable_params mlxsw_sp_nve_mc_list_ht_params = { + .key_len = sizeof(struct mlxsw_sp_nve_mc_list_key), + .key_offset = offsetof(struct mlxsw_sp_nve_mc_list, key), + .head_offset = offsetof(struct mlxsw_sp_nve_mc_list, ht_node), +}; + +static int +mlxsw_sp_nve_mc_record_ipv4_entry_add(struct mlxsw_sp_nve_mc_record *mc_record, + struct mlxsw_sp_nve_mc_entry *mc_entry, + const union mlxsw_sp_l3addr *addr) +{ + mc_entry->addr4 = addr->addr4; + + return 0; +} + +static void +mlxsw_sp_nve_mc_record_ipv4_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record, + const struct mlxsw_sp_nve_mc_entry *mc_entry) +{ +} + +static void +mlxsw_sp_nve_mc_record_ipv4_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record, + const struct mlxsw_sp_nve_mc_entry *mc_entry, + char *tnumt_pl, unsigned int entry_index) +{ + u32 udip = be32_to_cpu(mc_entry->addr4); + + mlxsw_reg_tnumt_udip_set(tnumt_pl, entry_index, udip); +} + +static bool +mlxsw_sp_nve_mc_record_ipv4_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record, + const struct mlxsw_sp_nve_mc_entry *mc_entry, + const union mlxsw_sp_l3addr *addr) +{ + return mc_entry->addr4 == addr->addr4; +} + +static const struct mlxsw_sp_nve_mc_record_ops +mlxsw_sp_nve_mc_record_ipv4_ops = { + .type = MLXSW_REG_TNUMT_RECORD_TYPE_IPV4, + .entry_add = &mlxsw_sp_nve_mc_record_ipv4_entry_add, + .entry_del = &mlxsw_sp_nve_mc_record_ipv4_entry_del, + .entry_set = &mlxsw_sp_nve_mc_record_ipv4_entry_set, + .entry_compare = &mlxsw_sp_nve_mc_record_ipv4_entry_compare, +}; + +static int +mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record *mc_record, + struct mlxsw_sp_nve_mc_entry *mc_entry, + const union mlxsw_sp_l3addr *addr) +{ + WARN_ON(1); + + return -EINVAL; +} + +static void +mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record, + const struct mlxsw_sp_nve_mc_entry *mc_entry) +{ +} + +static void +mlxsw_sp_nve_mc_record_ipv6_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record, + const struct mlxsw_sp_nve_mc_entry *mc_entry, + char *tnumt_pl, unsigned int entry_index) +{ + u32 udip_ptr = mc_entry->ipv6_entry.addr6_kvdl_index; + + mlxsw_reg_tnumt_udip_ptr_set(tnumt_pl, entry_index, udip_ptr); +} + +static bool +mlxsw_sp_nve_mc_record_ipv6_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record, + const struct mlxsw_sp_nve_mc_entry *mc_entry, + const union mlxsw_sp_l3addr *addr) +{ + return ipv6_addr_equal(&mc_entry->ipv6_entry.addr6, &addr->addr6); +} + +static const struct mlxsw_sp_nve_mc_record_ops +mlxsw_sp_nve_mc_record_ipv6_ops = { + .type = MLXSW_REG_TNUMT_RECORD_TYPE_IPV6, + .entry_add = &mlxsw_sp_nve_mc_record_ipv6_entry_add, + .entry_del = &mlxsw_sp_nve_mc_record_ipv6_entry_del, + .entry_set = &mlxsw_sp_nve_mc_record_ipv6_entry_set, + .entry_compare = &mlxsw_sp_nve_mc_record_ipv6_entry_compare, +}; + +static const struct mlxsw_sp_nve_mc_record_ops * +mlxsw_sp_nve_mc_record_ops_arr[] = { + [MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_nve_mc_record_ipv4_ops, + [MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_nve_mc_record_ipv6_ops, +}; + +static struct mlxsw_sp_nve_mc_list * +mlxsw_sp_nve_mc_list_find(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_nve_mc_list_key *key) +{ + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + + return rhashtable_lookup_fast(&nve->mc_list_ht, key, + mlxsw_sp_nve_mc_list_ht_params); +} + +static struct mlxsw_sp_nve_mc_list * +mlxsw_sp_nve_mc_list_create(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_nve_mc_list_key *key) +{ + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + struct mlxsw_sp_nve_mc_list *mc_list; + int err; + + mc_list = kmalloc(sizeof(*mc_list), GFP_KERNEL); + if (!mc_list) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&mc_list->records_list); + mc_list->key = *key; + + err = rhashtable_insert_fast(&nve->mc_list_ht, &mc_list->ht_node, + mlxsw_sp_nve_mc_list_ht_params); + if (err) + goto err_rhashtable_insert; + + return mc_list; + +err_rhashtable_insert: + kfree(mc_list); + return ERR_PTR(err); +} + +static void mlxsw_sp_nve_mc_list_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nve_mc_list *mc_list) +{ + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + + rhashtable_remove_fast(&nve->mc_list_ht, &mc_list->ht_node, + mlxsw_sp_nve_mc_list_ht_params); + WARN_ON(!list_empty(&mc_list->records_list)); + kfree(mc_list); +} + +static struct mlxsw_sp_nve_mc_list * +mlxsw_sp_nve_mc_list_get(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_nve_mc_list_key *key) +{ + struct mlxsw_sp_nve_mc_list *mc_list; + + mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, key); + if (mc_list) + return mc_list; + + return mlxsw_sp_nve_mc_list_create(mlxsw_sp, key); +} + +static void +mlxsw_sp_nve_mc_list_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nve_mc_list *mc_list) +{ + if (!list_empty(&mc_list->records_list)) + return; + mlxsw_sp_nve_mc_list_destroy(mlxsw_sp, mc_list); +} + +static struct mlxsw_sp_nve_mc_record * +mlxsw_sp_nve_mc_record_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nve_mc_list *mc_list, + enum mlxsw_sp_l3proto proto) +{ + unsigned int num_max_entries = mlxsw_sp->nve->num_max_mc_entries[proto]; + struct mlxsw_sp_nve_mc_record *mc_record; + int err; + + mc_record = kzalloc(sizeof(*mc_record) + num_max_entries * + sizeof(struct mlxsw_sp_nve_mc_entry), GFP_KERNEL); + if (!mc_record) + return ERR_PTR(-ENOMEM); + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1, + &mc_record->kvdl_index); + if (err) + goto err_kvdl_alloc; + + mc_record->ops = mlxsw_sp_nve_mc_record_ops_arr[proto]; + mc_record->mlxsw_sp = mlxsw_sp; + mc_record->mc_list = mc_list; + mc_record->proto = proto; + list_add_tail(&mc_record->list, &mc_list->records_list); + + return mc_record; + +err_kvdl_alloc: + kfree(mc_record); + return ERR_PTR(err); +} + +static void +mlxsw_sp_nve_mc_record_destroy(struct mlxsw_sp_nve_mc_record *mc_record) +{ + struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp; + + list_del(&mc_record->list); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1, + mc_record->kvdl_index); + WARN_ON(mc_record->num_entries); + kfree(mc_record); +} + +static struct mlxsw_sp_nve_mc_record * +mlxsw_sp_nve_mc_record_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nve_mc_list *mc_list, + enum mlxsw_sp_l3proto proto) +{ + struct mlxsw_sp_nve_mc_record *mc_record; + + list_for_each_entry_reverse(mc_record, &mc_list->records_list, list) { + unsigned int num_entries = mc_record->num_entries; + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + + if (mc_record->proto == proto && + num_entries < nve->num_max_mc_entries[proto]) + return mc_record; + } + + return mlxsw_sp_nve_mc_record_create(mlxsw_sp, mc_list, proto); +} + +static void +mlxsw_sp_nve_mc_record_put(struct mlxsw_sp_nve_mc_record *mc_record) +{ + if (mc_record->num_entries != 0) + return; + + mlxsw_sp_nve_mc_record_destroy(mc_record); +} + +static struct mlxsw_sp_nve_mc_entry * +mlxsw_sp_nve_mc_free_entry_find(struct mlxsw_sp_nve_mc_record *mc_record) +{ + struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve; + unsigned int num_max_entries; + int i; + + num_max_entries = nve->num_max_mc_entries[mc_record->proto]; + for (i = 0; i < num_max_entries; i++) { + if (mc_record->entries[i].valid) + continue; + return &mc_record->entries[i]; + } + + return NULL; +} + +static int +mlxsw_sp_nve_mc_record_refresh(struct mlxsw_sp_nve_mc_record *mc_record) +{ + enum mlxsw_reg_tnumt_record_type type = mc_record->ops->type; + struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list; + struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp; + char tnumt_pl[MLXSW_REG_TNUMT_LEN]; + unsigned int num_max_entries; + unsigned int num_entries = 0; + u32 next_kvdl_index = 0; + bool next_valid = false; + int i; + + if (!list_is_last(&mc_record->list, &mc_list->records_list)) { + struct mlxsw_sp_nve_mc_record *next_record; + + next_record = list_next_entry(mc_record, list); + next_kvdl_index = next_record->kvdl_index; + next_valid = true; + } + + mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TNUMT_TUNNEL_PORT_NVE, + mc_record->kvdl_index, next_valid, + next_kvdl_index, mc_record->num_entries); + + num_max_entries = mlxsw_sp->nve->num_max_mc_entries[mc_record->proto]; + for (i = 0; i < num_max_entries; i++) { + struct mlxsw_sp_nve_mc_entry *mc_entry; + + mc_entry = &mc_record->entries[i]; + if (!mc_entry->valid) + continue; + mc_record->ops->entry_set(mc_record, mc_entry, tnumt_pl, + num_entries++); + } + + WARN_ON(num_entries != mc_record->num_entries); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnumt), tnumt_pl); +} + +static bool +mlxsw_sp_nve_mc_record_is_first(struct mlxsw_sp_nve_mc_record *mc_record) +{ + struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list; + struct mlxsw_sp_nve_mc_record *first_record; + + first_record = list_first_entry(&mc_list->records_list, + struct mlxsw_sp_nve_mc_record, list); + + return mc_record == first_record; +} + +static struct mlxsw_sp_nve_mc_entry * +mlxsw_sp_nve_mc_entry_find(struct mlxsw_sp_nve_mc_record *mc_record, + union mlxsw_sp_l3addr *addr) +{ + struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve; + unsigned int num_max_entries; + int i; + + num_max_entries = nve->num_max_mc_entries[mc_record->proto]; + for (i = 0; i < num_max_entries; i++) { + struct mlxsw_sp_nve_mc_entry *mc_entry; + + mc_entry = &mc_record->entries[i]; + if (!mc_entry->valid) + continue; + if (mc_record->ops->entry_compare(mc_record, mc_entry, addr)) + return mc_entry; + } + + return NULL; +} + +static int +mlxsw_sp_nve_mc_record_ip_add(struct mlxsw_sp_nve_mc_record *mc_record, + union mlxsw_sp_l3addr *addr) +{ + struct mlxsw_sp_nve_mc_entry *mc_entry = NULL; + int err; + + mc_entry = mlxsw_sp_nve_mc_free_entry_find(mc_record); + if (WARN_ON(!mc_entry)) + return -EINVAL; + + err = mc_record->ops->entry_add(mc_record, mc_entry, addr); + if (err) + return err; + mc_record->num_entries++; + mc_entry->valid = true; + + err = mlxsw_sp_nve_mc_record_refresh(mc_record); + if (err) + goto err_record_refresh; + + /* If this is a new record and not the first one, then we need to + * update the next pointer of the previous entry + */ + if (mc_record->num_entries != 1 || + mlxsw_sp_nve_mc_record_is_first(mc_record)) + return 0; + + err = mlxsw_sp_nve_mc_record_refresh(list_prev_entry(mc_record, list)); + if (err) + goto err_prev_record_refresh; + + return 0; + +err_prev_record_refresh: +err_record_refresh: + mc_entry->valid = false; + mc_record->num_entries--; + mc_record->ops->entry_del(mc_record, mc_entry); + return err; +} + +static void +mlxsw_sp_nve_mc_record_entry_del(struct mlxsw_sp_nve_mc_record *mc_record, + struct mlxsw_sp_nve_mc_entry *mc_entry) +{ + struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list; + + mc_entry->valid = false; + mc_record->num_entries--; + + /* When the record continues to exist we only need to invalidate + * the requested entry + */ + if (mc_record->num_entries != 0) { + mlxsw_sp_nve_mc_record_refresh(mc_record); + mc_record->ops->entry_del(mc_record, mc_entry); + return; + } + + /* If the record needs to be deleted, but it is not the first, + * then we need to make sure that the previous record no longer + * points to it. Remove deleted record from the list to reflect + * that and then re-add it at the end, so that it could be + * properly removed by the record destruction code + */ + if (!mlxsw_sp_nve_mc_record_is_first(mc_record)) { + struct mlxsw_sp_nve_mc_record *prev_record; + + prev_record = list_prev_entry(mc_record, list); + list_del(&mc_record->list); + mlxsw_sp_nve_mc_record_refresh(prev_record); + list_add_tail(&mc_record->list, &mc_list->records_list); + mc_record->ops->entry_del(mc_record, mc_entry); + return; + } + + /* If the first record needs to be deleted, but the list is not + * singular, then the second record needs to be written in the + * first record's address, as this address is stored as a property + * of the FID + */ + if (mlxsw_sp_nve_mc_record_is_first(mc_record) && + !list_is_singular(&mc_list->records_list)) { + struct mlxsw_sp_nve_mc_record *next_record; + + next_record = list_next_entry(mc_record, list); + swap(mc_record->kvdl_index, next_record->kvdl_index); + mlxsw_sp_nve_mc_record_refresh(next_record); + mc_record->ops->entry_del(mc_record, mc_entry); + return; + } + + /* This is the last case where the last remaining record needs to + * be deleted. Simply delete the entry + */ + mc_record->ops->entry_del(mc_record, mc_entry); +} + +static struct mlxsw_sp_nve_mc_record * +mlxsw_sp_nve_mc_record_find(struct mlxsw_sp_nve_mc_list *mc_list, + enum mlxsw_sp_l3proto proto, + union mlxsw_sp_l3addr *addr, + struct mlxsw_sp_nve_mc_entry **mc_entry) +{ + struct mlxsw_sp_nve_mc_record *mc_record; + + list_for_each_entry(mc_record, &mc_list->records_list, list) { + if (mc_record->proto != proto) + continue; + + *mc_entry = mlxsw_sp_nve_mc_entry_find(mc_record, addr); + if (*mc_entry) + return mc_record; + } + + return NULL; +} + +static int mlxsw_sp_nve_mc_list_ip_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nve_mc_list *mc_list, + enum mlxsw_sp_l3proto proto, + union mlxsw_sp_l3addr *addr) +{ + struct mlxsw_sp_nve_mc_record *mc_record; + int err; + + mc_record = mlxsw_sp_nve_mc_record_get(mlxsw_sp, mc_list, proto); + if (IS_ERR(mc_record)) + return PTR_ERR(mc_record); + + err = mlxsw_sp_nve_mc_record_ip_add(mc_record, addr); + if (err) + goto err_ip_add; + + return 0; + +err_ip_add: + mlxsw_sp_nve_mc_record_put(mc_record); + return err; +} + +static void mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nve_mc_list *mc_list, + enum mlxsw_sp_l3proto proto, + union mlxsw_sp_l3addr *addr) +{ + struct mlxsw_sp_nve_mc_record *mc_record; + struct mlxsw_sp_nve_mc_entry *mc_entry; + + mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr, + &mc_entry); + if (WARN_ON(!mc_record)) + return; + + mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry); + mlxsw_sp_nve_mc_record_put(mc_record); +} + +static int +mlxsw_sp_nve_fid_flood_index_set(struct mlxsw_sp_fid *fid, + struct mlxsw_sp_nve_mc_list *mc_list) +{ + struct mlxsw_sp_nve_mc_record *mc_record; + + /* The address of the first record in the list is a property of + * the FID and we never change it. It only needs to be set when + * a new list is created + */ + if (mlxsw_sp_fid_nve_flood_index_is_set(fid)) + return 0; + + mc_record = list_first_entry(&mc_list->records_list, + struct mlxsw_sp_nve_mc_record, list); + + return mlxsw_sp_fid_nve_flood_index_set(fid, mc_record->kvdl_index); +} + +static void +mlxsw_sp_nve_fid_flood_index_clear(struct mlxsw_sp_fid *fid, + struct mlxsw_sp_nve_mc_list *mc_list) +{ + struct mlxsw_sp_nve_mc_record *mc_record; + + /* The address of the first record needs to be invalidated only when + * the last record is about to be removed + */ + if (!list_is_singular(&mc_list->records_list)) + return; + + mc_record = list_first_entry(&mc_list->records_list, + struct mlxsw_sp_nve_mc_record, list); + if (mc_record->num_entries != 1) + return; + + return mlxsw_sp_fid_nve_flood_index_clear(fid); +} + +int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *fid, + enum mlxsw_sp_l3proto proto, + union mlxsw_sp_l3addr *addr) +{ + struct mlxsw_sp_nve_mc_list_key key = { 0 }; + struct mlxsw_sp_nve_mc_list *mc_list; + int err; + + key.fid_index = mlxsw_sp_fid_index(fid); + mc_list = mlxsw_sp_nve_mc_list_get(mlxsw_sp, &key); + if (IS_ERR(mc_list)) + return PTR_ERR(mc_list); + + err = mlxsw_sp_nve_mc_list_ip_add(mlxsw_sp, mc_list, proto, addr); + if (err) + goto err_add_ip; + + err = mlxsw_sp_nve_fid_flood_index_set(fid, mc_list); + if (err) + goto err_fid_flood_index_set; + + return 0; + +err_fid_flood_index_set: + mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr); +err_add_ip: + mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list); + return err; +} + +void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *fid, + enum mlxsw_sp_l3proto proto, + union mlxsw_sp_l3addr *addr) +{ + struct mlxsw_sp_nve_mc_list_key key = { 0 }; + struct mlxsw_sp_nve_mc_list *mc_list; + + key.fid_index = mlxsw_sp_fid_index(fid); + mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key); + if (WARN_ON(!mc_list)) + return; + + mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list); + mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr); + mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list); +} + +static void +mlxsw_sp_nve_mc_record_delete(struct mlxsw_sp_nve_mc_record *mc_record) +{ + struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve; + unsigned int num_max_entries; + int i; + + num_max_entries = nve->num_max_mc_entries[mc_record->proto]; + for (i = 0; i < num_max_entries; i++) { + struct mlxsw_sp_nve_mc_entry *mc_entry = &mc_record->entries[i]; + + if (!mc_entry->valid) + continue; + mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry); + } + + WARN_ON(mc_record->num_entries); + mlxsw_sp_nve_mc_record_put(mc_record); +} + +static void mlxsw_sp_nve_flood_ip_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *fid) +{ + struct mlxsw_sp_nve_mc_record *mc_record, *tmp; + struct mlxsw_sp_nve_mc_list_key key = { 0 }; + struct mlxsw_sp_nve_mc_list *mc_list; + + if (!mlxsw_sp_fid_nve_flood_index_is_set(fid)) + return; + + mlxsw_sp_fid_nve_flood_index_clear(fid); + + key.fid_index = mlxsw_sp_fid_index(fid); + mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key); + if (WARN_ON(!mc_list)) + return; + + list_for_each_entry_safe(mc_record, tmp, &mc_list->records_list, list) + mlxsw_sp_nve_mc_record_delete(mc_record); + + WARN_ON(!list_empty(&mc_list->records_list)); + mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list); +} + +u32 mlxsw_sp_nve_decap_tunnel_index_get(const struct mlxsw_sp *mlxsw_sp) +{ + WARN_ON(mlxsw_sp->nve->num_nve_tunnels == 0); + + return mlxsw_sp->nve->tunnel_index; +} + +bool mlxsw_sp_nve_ipv4_route_is_decap(const struct mlxsw_sp *mlxsw_sp, + u32 tb_id, __be32 addr) +{ + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + struct mlxsw_sp_nve_config *config = &nve->config; + + if (nve->num_nve_tunnels && + config->ul_proto == MLXSW_SP_L3_PROTO_IPV4 && + config->ul_sip.addr4 == addr && config->ul_tb_id == tb_id) + return true; + + return false; +} + +static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nve_config *config) +{ + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + const struct mlxsw_sp_nve_ops *ops; + int err; + + if (nve->num_nve_tunnels++ != 0) + return 0; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + &nve->tunnel_index); + if (err) + goto err_kvdl_alloc; + + ops = nve->nve_ops_arr[config->type]; + err = ops->init(nve, config); + if (err) + goto err_ops_init; + + return 0; + +err_ops_init: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + nve->tunnel_index); +err_kvdl_alloc: + nve->num_nve_tunnels--; + return err; +} + +static void mlxsw_sp_nve_tunnel_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + const struct mlxsw_sp_nve_ops *ops; + + ops = nve->nve_ops_arr[nve->config.type]; + + if (mlxsw_sp->nve->num_nve_tunnels == 1) { + ops->fini(nve); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + nve->tunnel_index); + } + nve->num_nve_tunnels--; +} + +static void mlxsw_sp_nve_fdb_flush_by_fid(struct mlxsw_sp *mlxsw_sp, + u16 fid_index) +{ + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID); + mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, + struct mlxsw_sp_nve_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + const struct mlxsw_sp_nve_ops *ops; + struct mlxsw_sp_nve_config config; + int err; + + ops = nve->nve_ops_arr[params->type]; + + if (!ops->can_offload(nve, params->dev, extack)) + return -EOPNOTSUPP; + + memset(&config, 0, sizeof(config)); + ops->nve_config(nve, params->dev, &config); + if (nve->num_nve_tunnels && + memcmp(&config, &nve->config, sizeof(config))) { + NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration"); + return -EOPNOTSUPP; + } + + err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to initialize NVE tunnel"); + return err; + } + + err = mlxsw_sp_fid_vni_set(fid, params->vni); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set VNI on FID"); + goto err_fid_vni_set; + } + + nve->config = config; + + return 0; + +err_fid_vni_set: + mlxsw_sp_nve_tunnel_fini(mlxsw_sp); + return err; +} + +void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *fid) +{ + u16 fid_index = mlxsw_sp_fid_index(fid); + + mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid); + mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index); + mlxsw_sp_fid_vni_clear(fid); + mlxsw_sp_nve_tunnel_fini(mlxsw_sp); +} + +int mlxsw_sp_port_nve_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char tnqdr_pl[MLXSW_REG_TNQDR_LEN]; + + mlxsw_reg_tnqdr_pack(tnqdr_pl, mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqdr), tnqdr_pl); +} + +void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ +} + +static int mlxsw_sp_nve_qos_init(struct mlxsw_sp *mlxsw_sp) +{ + char tnqcr_pl[MLXSW_REG_TNQCR_LEN]; + + mlxsw_reg_tnqcr_pack(tnqcr_pl); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqcr), tnqcr_pl); +} + +static int mlxsw_sp_nve_ecn_encap_init(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + /* Iterate over inner ECN values */ + for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) { + u8 outer_ecn = INET_ECN_encapsulate(0, i); + char tneem_pl[MLXSW_REG_TNEEM_LEN]; + int err; + + mlxsw_reg_tneem_pack(tneem_pl, i, outer_ecn); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tneem), + tneem_pl); + if (err) + return err; + } + + return 0; +} + +static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp, + u8 inner_ecn, u8 outer_ecn) +{ + char tndem_pl[MLXSW_REG_TNDEM_LEN]; + bool trap_en, set_ce = false; + u8 new_inner_ecn; + + trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce); + new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn; + + mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn, + trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl); +} + +static int mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + /* Iterate over inner ECN values */ + for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) { + int j; + + /* Iterate over outer ECN values */ + for (j = INET_ECN_NOT_ECT; j <= INET_ECN_CE; j++) { + int err; + + err = __mlxsw_sp_nve_ecn_decap_init(mlxsw_sp, i, j); + if (err) + return err; + } + } + + return 0; +} + +static int mlxsw_sp_nve_ecn_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + err = mlxsw_sp_nve_ecn_encap_init(mlxsw_sp); + if (err) + return err; + + return mlxsw_sp_nve_ecn_decap_init(mlxsw_sp); +} + +static int mlxsw_sp_nve_resources_query(struct mlxsw_sp *mlxsw_sp) +{ + unsigned int max; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4) || + !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6)) + return -EIO; + max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4); + mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV4] = max; + max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6); + mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV6] = max; + + return 0; +} + +int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_nve *nve; + int err; + + nve = kzalloc(sizeof(*mlxsw_sp->nve), GFP_KERNEL); + if (!nve) + return -ENOMEM; + mlxsw_sp->nve = nve; + nve->mlxsw_sp = mlxsw_sp; + nve->nve_ops_arr = mlxsw_sp->nve_ops_arr; + + err = rhashtable_init(&nve->mc_list_ht, + &mlxsw_sp_nve_mc_list_ht_params); + if (err) + goto err_rhashtable_init; + + err = mlxsw_sp_nve_qos_init(mlxsw_sp); + if (err) + goto err_nve_qos_init; + + err = mlxsw_sp_nve_ecn_init(mlxsw_sp); + if (err) + goto err_nve_ecn_init; + + err = mlxsw_sp_nve_resources_query(mlxsw_sp); + if (err) + goto err_nve_resources_query; + + return 0; + +err_nve_resources_query: +err_nve_ecn_init: +err_nve_qos_init: + rhashtable_destroy(&nve->mc_list_ht); +err_rhashtable_init: + mlxsw_sp->nve = NULL; + kfree(nve); + return err; +} + +void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp) +{ + WARN_ON(mlxsw_sp->nve->num_nve_tunnels); + rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht); + mlxsw_sp->nve = NULL; + kfree(mlxsw_sp->nve); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h new file mode 100644 index 000000000000..4cc3297e13d6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#ifndef _MLXSW_SPECTRUM_NVE_H +#define _MLXSW_SPECTRUM_NVE_H + +#include <linux/netlink.h> +#include <linux/rhashtable.h> + +#include "spectrum.h" + +struct mlxsw_sp_nve_config { + enum mlxsw_sp_nve_type type; + u8 ttl; + u8 learning_en:1; + __be16 udp_dport; + __be32 flowlabel; + u32 ul_tb_id; + enum mlxsw_sp_l3proto ul_proto; + union mlxsw_sp_l3addr ul_sip; +}; + +struct mlxsw_sp_nve { + struct mlxsw_sp_nve_config config; + struct rhashtable mc_list_ht; + struct mlxsw_sp *mlxsw_sp; + const struct mlxsw_sp_nve_ops **nve_ops_arr; + unsigned int num_nve_tunnels; /* Protected by RTNL */ + unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX]; + u32 tunnel_index; +}; + +struct mlxsw_sp_nve_ops { + enum mlxsw_sp_nve_type type; + bool (*can_offload)(const struct mlxsw_sp_nve *nve, + const struct net_device *dev, + struct netlink_ext_ack *extack); + void (*nve_config)(const struct mlxsw_sp_nve *nve, + const struct net_device *dev, + struct mlxsw_sp_nve_config *config); + int (*init)(struct mlxsw_sp_nve *nve, + const struct mlxsw_sp_nve_config *config); + void (*fini)(struct mlxsw_sp_nve *nve); +}; + +extern const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops; +extern const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops; + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c new file mode 100644 index 000000000000..d21c7be5b1c9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#include <linux/netdevice.h> +#include <linux/netlink.h> +#include <linux/random.h> +#include <net/vxlan.h> + +#include "reg.h" +#include "spectrum_nve.h" + +/* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B) + * + * In the worst case - where we have a VLAN tag on the outer Ethernet + * header and IPv6 in overlay and underlay - we need to parse 128 bytes + */ +#define MLXSW_SP_NVE_VXLAN_PARSING_DEPTH 128 +#define MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH 96 + +#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS VXLAN_F_UDP_ZERO_CSUM_TX + +static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, + const struct net_device *dev, + struct netlink_ext_ack *extack) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_config *cfg = &vxlan->cfg; + + if (cfg->saddr.sa.sa_family != AF_INET) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only IPv4 underlay is supported"); + return false; + } + + if (vxlan_addr_multicast(&cfg->remote_ip)) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported"); + return false; + } + + if (vxlan_addr_any(&cfg->saddr)) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Source address must be specified"); + return false; + } + + if (cfg->remote_ifindex) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Local interface is not supported"); + return false; + } + + if (cfg->port_min || cfg->port_max) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only default UDP source port range is supported"); + return false; + } + + if (cfg->tos != 1) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: TOS must be configured to inherit"); + return false; + } + + if (cfg->flags & VXLAN_F_TTL_INHERIT) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to inherit"); + return false; + } + + if (cfg->flags & VXLAN_F_LEARN) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Learning is not supported"); + return false; + } + + if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported"); + return false; + } + + if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag"); + return false; + } + + if (cfg->ttl == 0) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to 0"); + return false; + } + + if (cfg->label != 0) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Flow label must be configured to 0"); + return false; + } + + return true; +} + +static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, + const struct net_device *dev, + struct mlxsw_sp_nve_config *config) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_config *cfg = &vxlan->cfg; + + config->type = MLXSW_SP_NVE_TYPE_VXLAN; + config->ttl = cfg->ttl; + config->flowlabel = cfg->label; + config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0; + config->ul_tb_id = RT_TABLE_MAIN; + config->ul_proto = MLXSW_SP_L3_PROTO_IPV4; + config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr; + config->udp_dport = cfg->dst_port; +} + +static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, + unsigned int parsing_depth, + __be16 udp_dport) +{ + char mprs_pl[MLXSW_REG_MPRS_LEN]; + + mlxsw_reg_mprs_pack(mprs_pl, parsing_depth, be16_to_cpu(udp_dport)); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); +} + +static int +mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_nve_config *config) +{ + char tngcr_pl[MLXSW_REG_TNGCR_LEN]; + u16 ul_vr_id; + u8 udp_sport; + int err; + + err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id, + &ul_vr_id); + if (err) + return err; + + mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true, + config->ttl); + /* VxLAN driver's default UDP source port range is 32768 (0x8000) + * to 60999 (0xee47). Set the upper 8 bits of the UDP source port + * to a random number between 0x80 and 0xee + */ + get_random_bytes(&udp_sport, sizeof(udp_sport)); + udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80; + mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport); + mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en); + mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id); + mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4)); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); +} + +static void mlxsw_sp1_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp) +{ + char tngcr_pl[MLXSW_REG_TNGCR_LEN]; + + mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0); + + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); +} + +static int mlxsw_sp1_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp, + unsigned int tunnel_index) +{ + char rtdp_pl[MLXSW_REG_RTDP_LEN]; + + mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl); +} + +static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve, + const struct mlxsw_sp_nve_config *config) +{ + struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; + int err; + + err = mlxsw_sp_nve_parsing_set(mlxsw_sp, + MLXSW_SP_NVE_VXLAN_PARSING_DEPTH, + config->udp_dport); + if (err) + return err; + + err = mlxsw_sp1_nve_vxlan_config_set(mlxsw_sp, config); + if (err) + goto err_config_set; + + err = mlxsw_sp1_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index); + if (err) + goto err_rtdp_set; + + err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id, + config->ul_proto, + &config->ul_sip, + nve->tunnel_index); + if (err) + goto err_promote_decap; + + return 0; + +err_promote_decap: +err_rtdp_set: + mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); +err_config_set: + mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH, + config->udp_dport); + return err; +} + +static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve) +{ + struct mlxsw_sp_nve_config *config = &nve->config; + struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; + + mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, + config->ul_proto, &config->ul_sip); + mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); + mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH, + config->udp_dport); +} + +const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { + .type = MLXSW_SP_NVE_TYPE_VXLAN, + .can_offload = mlxsw_sp1_nve_vxlan_can_offload, + .nve_config = mlxsw_sp_nve_vxlan_config, + .init = mlxsw_sp1_nve_vxlan_init, + .fini = mlxsw_sp1_nve_vxlan_fini, +}; + +static bool mlxsw_sp2_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, + const struct net_device *dev, + struct netlink_ext_ack *extack) +{ + return false; +} + +static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve, + const struct mlxsw_sp_nve_config *config) +{ + return -EOPNOTSUPP; +} + +static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve) +{ +} + +const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = { + .type = MLXSW_SP_NVE_TYPE_VXLAN, + .can_offload = mlxsw_sp2_nve_vxlan_can_offload, + .nve_config = mlxsw_sp_nve_vxlan_config, + .init = mlxsw_sp2_nve_vxlan_init, + .fini = mlxsw_sp2_nve_vxlan_fini, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 2ab9cf25a08a..9e9bb57134f2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -366,6 +366,7 @@ enum mlxsw_sp_fib_entry_type { * encapsulating entries.) */ MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP, + MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP, }; struct mlxsw_sp_nexthop_group; @@ -741,6 +742,19 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, return NULL; } +int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id, + u16 *vr_id) +{ + struct mlxsw_sp_vr *vr; + + vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id); + if (!vr) + return -ESRCH; + *vr_id = vr->id; + + return 0; +} + static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr, enum mlxsw_sp_l3proto proto) { @@ -1128,6 +1142,52 @@ mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); } +static struct mlxsw_sp_fib_entry * +mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id, + enum mlxsw_sp_l3proto proto, + const union mlxsw_sp_l3addr *addr, + enum mlxsw_sp_fib_entry_type type) +{ + struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib_node *fib_node; + unsigned char addr_prefix_len; + struct mlxsw_sp_fib *fib; + struct mlxsw_sp_vr *vr; + const void *addrp; + size_t addr_len; + u32 addr4; + + vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id); + if (!vr) + return NULL; + fib = mlxsw_sp_vr_fib(vr, proto); + + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + addr4 = be32_to_cpu(addr->addr4); + addrp = &addr4; + addr_len = 4; + addr_prefix_len = 32; + break; + case MLXSW_SP_L3_PROTO_IPV6: /* fall through */ + default: + WARN_ON(1); + return NULL; + } + + fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len, + addr_prefix_len); + if (!fib_node || list_empty(&fib_node->entry_list)) + return NULL; + + fib_entry = list_first_entry(&fib_node->entry_list, + struct mlxsw_sp_fib_entry, list); + if (fib_entry->type != type) + return NULL; + + return fib_entry; +} + /* Given an IPIP entry, find the corresponding decap route. */ static struct mlxsw_sp_fib_entry * mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp, @@ -1765,6 +1825,56 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, return 0; } +int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, + enum mlxsw_sp_l3proto ul_proto, + const union mlxsw_sp_l3addr *ul_sip, + u32 tunnel_index) +{ + enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + struct mlxsw_sp_fib_entry *fib_entry; + int err; + + /* It is valid to create a tunnel with a local IP and only later + * assign this IP address to a local interface + */ + fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id, + ul_proto, ul_sip, + type); + if (!fib_entry) + return 0; + + fib_entry->decap.tunnel_index = tunnel_index; + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; + + err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); + if (err) + goto err_fib_entry_update; + + return 0; + +err_fib_entry_update: + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); + return err; +} + +void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, + enum mlxsw_sp_l3proto ul_proto, + const union mlxsw_sp_l3addr *ul_sip) +{ + enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; + struct mlxsw_sp_fib_entry *fib_entry; + + fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id, + ul_proto, ul_sip, + type); + if (!fib_entry) + return; + + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); +} + struct mlxsw_sp_neigh_key { struct neighbour *n; }; @@ -3815,6 +3925,7 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: return !!nh_group->nh_rif; case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: + case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP: return true; default: return false; @@ -3848,7 +3959,8 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) int i; if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL || - fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) { + fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP || + fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) { nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD; return; } @@ -4072,6 +4184,18 @@ mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp, fib_entry->decap.tunnel_index); } +static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + char ralue_pl[MLXSW_REG_RALUE_LEN]; + + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); + mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl, + fib_entry->decap.tunnel_index); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); +} + static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) @@ -4086,6 +4210,8 @@ static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, fib_entry, op); + case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP: + return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op); } return -EINVAL; } @@ -4121,6 +4247,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry) { union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) }; + u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id); struct net_device *dev = fen_info->fi->fib_dev; struct mlxsw_sp_ipip_entry *ipip_entry; struct fib_info *fi = fen_info->fi; @@ -4135,6 +4262,15 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, fib_entry, ipip_entry); } + if (mlxsw_sp_nve_ipv4_route_is_decap(mlxsw_sp, tb_id, + dip.addr4)) { + u32 t_index; + + t_index = mlxsw_sp_nve_decap_tunnel_index_get(mlxsw_sp); + fib_entry->decap.tunnel_index = t_index; + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; + return 0; + } /* fall through */ case RTN_BROADCAST: fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 1a60391daafa..3dbafdeaab2b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -7,17 +7,6 @@ #include "spectrum.h" #include "reg.h" -enum mlxsw_sp_l3proto { - MLXSW_SP_L3_PROTO_IPV4, - MLXSW_SP_L3_PROTO_IPV6, -#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1) -}; - -union mlxsw_sp_l3addr { - __be32 addr4; - struct in6_addr addr6; -}; - struct mlxsw_sp_rif_ipip_lb; struct mlxsw_sp_rif_ipip_lb_config { enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; @@ -35,8 +24,6 @@ struct mlxsw_sp_neigh_entry; struct mlxsw_sp_nexthop; struct mlxsw_sp_ipip_entry; -struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev); struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 rif_index); u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); @@ -44,9 +31,7 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif); u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif); u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); -u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); -struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index db715da7bab7..bc60d7a8b49d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -15,9 +15,9 @@ #include <linux/rtnetlink.h> #include <linux/netlink.h> #include <net/switchdev.h> +#include <net/vxlan.h> #include "spectrum_span.h" -#include "spectrum_router.h" #include "spectrum_switchdev.h" #include "spectrum.h" #include "core.h" @@ -84,9 +84,19 @@ struct mlxsw_sp_bridge_ops { void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_bridge_port *bridge_port, struct mlxsw_sp_port *mlxsw_sp_port); + int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, + struct netlink_ext_ack *extack); + void (*vxlan_leave)(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev); struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device, u16 vid); + struct mlxsw_sp_fid * + (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device, + u16 vid); + u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device, + const struct mlxsw_sp_fid *fid); }; static int @@ -1237,6 +1247,51 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) MLXSW_REG_SFD_OP_WRITE_REMOVE; } +static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + enum mlxsw_sp_l3proto proto, + const union mlxsw_sp_l3addr *addr, + bool adding, bool dynamic) +{ + enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto; + char *sfd_pl; + u8 num_rec; + u32 uip; + int err; + + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + uip = be32_to_cpu(addr->addr4); + sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4; + break; + case MLXSW_SP_L3_PROTO_IPV6: /* fall through */ + default: + WARN_ON(1); + return -EOPNOTSUPP; + } + + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0, + mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, + MLXSW_REG_SFD_REC_ACTION_NOP, uip, + sfd_proto); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; + +out: + kfree(sfd_pl); + return err; +} + static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, const char *mac, u16 fid, bool adding, enum mlxsw_reg_sfd_rec_action action, @@ -1950,6 +2005,21 @@ mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device, mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); } +static int +mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, + struct netlink_ext_ack *extack) +{ + WARN_ON(1); + return -EINVAL; +} + +static void +mlxsw_sp_bridge_8021q_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev) +{ +} + static struct mlxsw_sp_fid * mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device, u16 vid) @@ -1959,10 +2029,29 @@ mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device, return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid); } +static struct mlxsw_sp_fid * +mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device, + u16 vid) +{ + WARN_ON(1); + return NULL; +} + +static u16 +mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device, + const struct mlxsw_sp_fid *fid) +{ + return mlxsw_sp_fid_8021q_vid(fid); +} + static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = { .port_join = mlxsw_sp_bridge_8021q_port_join, .port_leave = mlxsw_sp_bridge_8021q_port_leave, + .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join, + .vxlan_leave = mlxsw_sp_bridge_8021q_vxlan_leave, .fid_get = mlxsw_sp_bridge_8021q_fid_get, + .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup, + .fid_vid = mlxsw_sp_bridge_8021q_fid_vid, }; static bool @@ -2026,19 +2115,126 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); } +static int +mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); + struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); + struct mlxsw_sp_nve_params params = { + .type = MLXSW_SP_NVE_TYPE_VXLAN, + .vni = vxlan->cfg.vni, + .dev = vxlan_dev, + }; + struct mlxsw_sp_fid *fid; + int err; + + fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex); + if (!fid) + return -EINVAL; + + if (mlxsw_sp_fid_vni_is_set(fid)) + return -EINVAL; + + err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack); + if (err) + goto err_nve_fid_enable; + + /* The tunnel port does not hold a reference on the FID. Only + * local ports and the router port + */ + mlxsw_sp_fid_put(fid); + + return 0; + +err_nve_fid_enable: + mlxsw_sp_fid_put(fid); + return err; +} + +static void +mlxsw_sp_bridge_8021d_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); + struct mlxsw_sp_fid *fid; + + fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex); + if (WARN_ON(!fid)) + return; + + /* If the VxLAN device is down, then the FID does not have a VNI */ + if (!mlxsw_sp_fid_vni_is_set(fid)) + goto out; + + mlxsw_sp_nve_fid_disable(mlxsw_sp, fid); +out: + mlxsw_sp_fid_put(fid); +} + static struct mlxsw_sp_fid * mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device, u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); + struct net_device *vxlan_dev; + struct mlxsw_sp_fid *fid; + int err; + + fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex); + if (IS_ERR(fid)) + return fid; + + if (mlxsw_sp_fid_vni_is_set(fid)) + return fid; + + vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev); + if (!vxlan_dev) + return fid; + + if (!netif_running(vxlan_dev)) + return fid; + + err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, NULL); + if (err) + goto err_vxlan_join; + + return fid; + +err_vxlan_join: + mlxsw_sp_fid_put(fid); + return ERR_PTR(err); +} + +static struct mlxsw_sp_fid * +mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device, + u16 vid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); + + /* The only valid VLAN for a VLAN-unaware bridge is 0 */ + if (vid) + return NULL; - return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex); + return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex); +} + +static u16 +mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device, + const struct mlxsw_sp_fid *fid) +{ + return 0; } static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = { .port_join = mlxsw_sp_bridge_8021d_port_join, .port_leave = mlxsw_sp_bridge_8021d_port_leave, + .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join, + .vxlan_leave = mlxsw_sp_bridge_8021d_vxlan_leave, .fid_get = mlxsw_sp_bridge_8021d_fid_get, + .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup, + .fid_vid = mlxsw_sp_bridge_8021d_fid_vid, }; int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, @@ -2088,15 +2284,43 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); } +int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev, + const struct net_device *vxlan_dev, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_bridge_device *bridge_device; + + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); + if (WARN_ON(!bridge_device)) + return -EINVAL; + + return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, extack); +} + +void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev, + const struct net_device *vxlan_dev) +{ + struct mlxsw_sp_bridge_device *bridge_device; + + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); + if (WARN_ON(!bridge_device)) + return; + + bridge_device->ops->vxlan_leave(bridge_device, vxlan_dev); +} + static void mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type, const char *mac, u16 vid, - struct net_device *dev) + struct net_device *dev, bool offloaded) { struct switchdev_notifier_fdb_info info; info.addr = mac; info.vid = vid; + info.offloaded = offloaded; call_switchdev_notifiers(type, dev, &info.info); } @@ -2148,7 +2372,7 @@ do_fdb_op: if (!do_notification) return; type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; - mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev); + mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding); return; @@ -2208,7 +2432,7 @@ do_fdb_op: if (!do_notification) return; type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; - mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev); + mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding); return; @@ -2284,12 +2508,127 @@ out: struct mlxsw_sp_switchdev_event_work { struct work_struct work; - struct switchdev_notifier_fdb_info fdb_info; + union { + struct switchdev_notifier_fdb_info fdb_info; + struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info; + }; struct net_device *dev; unsigned long event; }; -static void mlxsw_sp_switchdev_event_work(struct work_struct *work) +static void +mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr, + enum mlxsw_sp_l3proto *proto, + union mlxsw_sp_l3addr *addr) +{ + if (vxlan_addr->sa.sa_family == AF_INET) { + addr->addr4 = vxlan_addr->sin.sin_addr.s_addr; + *proto = MLXSW_SP_L3_PROTO_IPV4; + } else { + addr->addr6 = vxlan_addr->sin6.sin6_addr; + *proto = MLXSW_SP_L3_PROTO_IPV6; + } +} + +static void +mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_switchdev_event_work * + switchdev_work, + struct mlxsw_sp_fid *fid, __be32 vni) +{ + struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info; + struct switchdev_notifier_fdb_info *fdb_info; + struct net_device *dev = switchdev_work->dev; + enum mlxsw_sp_l3proto proto; + union mlxsw_sp_l3addr addr; + int err; + + fdb_info = &switchdev_work->fdb_info; + err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info); + if (err) + return; + + mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip, + &proto, &addr); + + switch (switchdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, + vxlan_fdb_info.eth_addr, + mlxsw_sp_fid_index(fid), + proto, &addr, true, false); + if (err) + return; + vxlan_fdb_info.offloaded = true; + call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, + &vxlan_fdb_info.info); + mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, + vxlan_fdb_info.eth_addr, + fdb_info->vid, dev, true); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, + vxlan_fdb_info.eth_addr, + mlxsw_sp_fid_index(fid), + proto, &addr, false, + false); + vxlan_fdb_info.offloaded = false; + call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, + &vxlan_fdb_info.info); + break; + } +} + +static void +mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work * + switchdev_work) +{ + struct mlxsw_sp_bridge_device *bridge_device; + struct net_device *dev = switchdev_work->dev; + struct net_device *br_dev; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_fid *fid; + __be32 vni; + int err; + + if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE && + switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE) + return; + + if (!switchdev_work->fdb_info.added_by_user) + return; + + if (!netif_running(dev)) + return; + br_dev = netdev_master_upper_dev_get(dev); + if (!br_dev) + return; + if (!netif_is_bridge_master(br_dev)) + return; + mlxsw_sp = mlxsw_sp_lower_get(br_dev); + if (!mlxsw_sp) + return; + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); + if (!bridge_device) + return; + + fid = bridge_device->ops->fid_lookup(bridge_device, + switchdev_work->fdb_info.vid); + if (!fid) + return; + + err = mlxsw_sp_fid_vni(fid, &vni); + if (err) + goto out; + + mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid, + vni); + +out: + mlxsw_sp_fid_put(fid); +} + +static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) { struct mlxsw_sp_switchdev_event_work *switchdev_work = container_of(work, struct mlxsw_sp_switchdev_event_work, work); @@ -2299,6 +2638,11 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work) int err; rtnl_lock(); + if (netif_is_vxlan(dev)) { + mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work); + goto out; + } + mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); if (!mlxsw_sp_port) goto out; @@ -2313,7 +2657,7 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work) break; mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, fdb_info->addr, - fdb_info->vid, dev); + fdb_info->vid, dev, true); break; case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; @@ -2338,22 +2682,213 @@ out: dev_put(dev); } +static void +mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_switchdev_event_work * + switchdev_work) +{ + struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info; + struct mlxsw_sp_bridge_device *bridge_device; + struct net_device *dev = switchdev_work->dev; + u8 all_zeros_mac[ETH_ALEN] = { 0 }; + enum mlxsw_sp_l3proto proto; + union mlxsw_sp_l3addr addr; + struct net_device *br_dev; + struct mlxsw_sp_fid *fid; + u16 vid; + int err; + + vxlan_fdb_info = &switchdev_work->vxlan_fdb_info; + br_dev = netdev_master_upper_dev_get(dev); + + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); + if (!bridge_device) + return; + + fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni); + if (!fid) + return; + + mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip, + &proto, &addr); + + if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) { + err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr); + if (err) { + mlxsw_sp_fid_put(fid); + return; + } + vxlan_fdb_info->offloaded = true; + call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, + &vxlan_fdb_info->info); + mlxsw_sp_fid_put(fid); + return; + } + + /* The device has a single FDB table, whereas Linux has two - one + * in the bridge driver and another in the VxLAN driver. We only + * program an entry to the device if the MAC points to the VxLAN + * device in the bridge's FDB table + */ + vid = bridge_device->ops->fid_vid(bridge_device, fid); + if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev) + goto err_br_fdb_find; + + err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr, + mlxsw_sp_fid_index(fid), proto, + &addr, true, false); + if (err) + goto err_fdb_tunnel_uc_op; + vxlan_fdb_info->offloaded = true; + call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, + &vxlan_fdb_info->info); + mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, + vxlan_fdb_info->eth_addr, vid, dev, true); + + mlxsw_sp_fid_put(fid); + + return; + +err_fdb_tunnel_uc_op: +err_br_fdb_find: + mlxsw_sp_fid_put(fid); +} + +static void +mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_switchdev_event_work * + switchdev_work) +{ + struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info; + struct mlxsw_sp_bridge_device *bridge_device; + struct net_device *dev = switchdev_work->dev; + struct net_device *br_dev = netdev_master_upper_dev_get(dev); + u8 all_zeros_mac[ETH_ALEN] = { 0 }; + enum mlxsw_sp_l3proto proto; + union mlxsw_sp_l3addr addr; + struct mlxsw_sp_fid *fid; + u16 vid; + + vxlan_fdb_info = &switchdev_work->vxlan_fdb_info; + + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); + if (!bridge_device) + return; + + fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni); + if (!fid) + return; + + mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip, + &proto, &addr); + + if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) { + mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr); + mlxsw_sp_fid_put(fid); + return; + } + + mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr, + mlxsw_sp_fid_index(fid), proto, &addr, + false, false); + vid = bridge_device->ops->fid_vid(bridge_device, fid); + mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, + vxlan_fdb_info->eth_addr, vid, dev, false); + + mlxsw_sp_fid_put(fid); +} + +static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work) +{ + struct mlxsw_sp_switchdev_event_work *switchdev_work = + container_of(work, struct mlxsw_sp_switchdev_event_work, work); + struct net_device *dev = switchdev_work->dev; + struct mlxsw_sp *mlxsw_sp; + struct net_device *br_dev; + + rtnl_lock(); + + if (!netif_running(dev)) + goto out; + br_dev = netdev_master_upper_dev_get(dev); + if (!br_dev) + goto out; + if (!netif_is_bridge_master(br_dev)) + goto out; + mlxsw_sp = mlxsw_sp_lower_get(br_dev); + if (!mlxsw_sp) + goto out; + + switch (switchdev_work->event) { + case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: + mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work); + break; + case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE: + mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work); + break; + } + +out: + rtnl_unlock(); + kfree(switchdev_work); + dev_put(dev); +} + +static int +mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work * + switchdev_work, + struct switchdev_notifier_info *info) +{ + struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev); + struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info; + struct vxlan_config *cfg = &vxlan->cfg; + + vxlan_fdb_info = container_of(info, + struct switchdev_notifier_vxlan_fdb_info, + info); + + if (vxlan_fdb_info->remote_port != cfg->dst_port) + return -EOPNOTSUPP; + if (vxlan_fdb_info->remote_vni != cfg->vni) + return -EOPNOTSUPP; + if (vxlan_fdb_info->vni != cfg->vni) + return -EOPNOTSUPP; + if (vxlan_fdb_info->remote_ifindex) + return -EOPNOTSUPP; + if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) + return -EOPNOTSUPP; + if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) + return -EOPNOTSUPP; + + switchdev_work->vxlan_fdb_info = *vxlan_fdb_info; + + return 0; +} + /* Called under rcu_read_lock() */ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = switchdev_notifier_info_to_dev(ptr); struct mlxsw_sp_switchdev_event_work *switchdev_work; - struct switchdev_notifier_fdb_info *fdb_info = ptr; + struct switchdev_notifier_fdb_info *fdb_info; + struct switchdev_notifier_info *info = ptr; + struct net_device *br_dev; + int err; - if (!mlxsw_sp_port_dev_lower_find_rcu(dev)) + /* Tunnel devices are not our uppers, so check their master instead */ + br_dev = netdev_master_upper_dev_get_rcu(dev); + if (!br_dev) + return NOTIFY_DONE; + if (!netif_is_bridge_master(br_dev)) + return NOTIFY_DONE; + if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev)) return NOTIFY_DONE; switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); if (!switchdev_work) return NOTIFY_BAD; - INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work); switchdev_work->dev = dev; switchdev_work->event = event; @@ -2362,6 +2897,11 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */ case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ case SWITCHDEV_FDB_DEL_TO_BRIDGE: + fdb_info = container_of(info, + struct switchdev_notifier_fdb_info, + info); + INIT_WORK(&switchdev_work->work, + mlxsw_sp_switchdev_bridge_fdb_event_work); memcpy(&switchdev_work->fdb_info, ptr, sizeof(switchdev_work->fdb_info)); switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); @@ -2375,6 +2915,16 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, */ dev_hold(dev); break; + case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */ + case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE: + INIT_WORK(&switchdev_work->work, + mlxsw_sp_switchdev_vxlan_fdb_event_work); + err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work, + info); + if (err) + goto err_vxlan_work_prepare; + dev_hold(dev); + break; default: kfree(switchdev_work); return NOTIFY_DONE; @@ -2384,6 +2934,7 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, return NOTIFY_DONE; +err_vxlan_work_prepare: err_addr_alloc: kfree(switchdev_work); return NOTIFY_BAD; diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 53020724c2f6..6f18f4d3322a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -24,6 +24,7 @@ enum { MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, MLXSW_TRAP_ID_PKT_SAMPLE = 0x38, MLXSW_TRAP_ID_FID_MISS = 0x3D, + MLXSW_TRAP_ID_DECAP_ECN0 = 0x40, MLXSW_TRAP_ID_ARPBC = 0x50, MLXSW_TRAP_ID_ARPUC = 0x51, MLXSW_TRAP_ID_MTUERROR = 0x52, @@ -59,6 +60,7 @@ enum { MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91, MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92, MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1, + MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD, MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, MLXSW_TRAP_ID_ACL0 = 0x1C0, diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index bd51e057e915..b881f5d4a7f9 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -1164,7 +1164,7 @@ ks8695_timeout(struct net_device *ndev) * sk_buff and adds it to the TX ring. It then kicks the TX DMA * engine to ensure transmission begins. */ -static int +static netdev_tx_t ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 0e9719fbc624..35f8c9ef204d 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1021,9 +1021,9 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) * spin_lock_irqsave is required because tx and rx should be mutual exclusive. * So while tx is in-progress, prevent IRQ interrupt from happenning. */ -static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) { - int retv = NETDEV_TX_OK; + netdev_tx_t retv = NETDEV_TX_OK; struct ks_net *ks = netdev_priv(netdev); disable_irq(netdev->irq); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 001b5f714c1b..867cddba840f 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -999,7 +999,6 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter) struct phy_device *phydev; struct net_device *netdev; int ret = -EIO; - u32 mii_adv; netdev = adapter->netdev; phydev = phy_find_first(adapter->mdiobus); @@ -1013,13 +1012,11 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter) goto return_error; /* MAC doesn't support 1000T Half */ - phydev->supported &= ~SUPPORTED_1000baseT_Half; + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); /* support both flow controls */ + phy_support_asym_pause(phydev); phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); - phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - mii_adv = (u32)mii_advertise_flowctrl(phy->fc_request_control); - phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); phy->fc_autoneg = phydev->autoneg; phy_start(phydev); diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index ccdf9123f26f..b2109eca81fd 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -977,8 +977,8 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter) lan743x_ptp_disable(adapter); } -void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter, - bool ts_insert_enable) +static void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter, + bool ts_insert_enable) { u32 ptp_tx_mod = lan743x_csr_read(adapter, PTP_TX_MOD); diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index 36c84625d54e..bcec0587cf61 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -23,6 +23,8 @@ config MSCC_OCELOT_SWITCH config MSCC_OCELOT_SWITCH_OCELOT tristate "Ocelot switch driver on Ocelot" depends on MSCC_OCELOT_SWITCH + depends on GENERIC_PHY + depends on OF_NET help This driver supports the Ocelot network switch device as present on the Ocelot SoCs. diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 1a4f2bb48ead..3238b9ee42f3 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -133,9 +133,9 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) { unsigned int val, timeout = 10; - /* Wait for the issued mac table command to be completed, or timeout. - * When the command read from ANA_TABLES_MACACCESS is - * MACACCESS_CMD_IDLE, the issued command completed successfully. + /* Wait for the issued vlan table command to be completed, or timeout. + * When the command read from ANA_TABLES_VLANACCESS is + * VLANACCESS_CMD_IDLE, the issued command completed successfully. */ do { val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS); @@ -472,6 +472,7 @@ static int ocelot_port_open(struct net_device *dev) { struct ocelot_port *port = netdev_priv(dev); struct ocelot *ocelot = port->ocelot; + enum phy_mode phy_mode; int err; /* Enable receiving frames on the port, and activate auto-learning of @@ -482,8 +483,21 @@ static int ocelot_port_open(struct net_device *dev) ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port), ANA_PORT_PORT_CFG, port->chip_port); + if (port->serdes) { + if (port->phy_mode == PHY_INTERFACE_MODE_SGMII) + phy_mode = PHY_MODE_SGMII; + else + phy_mode = PHY_MODE_QSGMII; + + err = phy_set_mode(port->serdes, phy_mode); + if (err) { + netdev_err(dev, "Could not set mode of SerDes\n"); + return err; + } + } + err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link, - PHY_INTERFACE_MODE_NA); + port->phy_mode); if (err) { netdev_err(dev, "Could not attach to PHY\n"); return err; @@ -1606,7 +1620,7 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, dev->ethtool_ops = &ocelot_ethtool_ops; dev->switchdev_ops = &ocelot_port_switchdev_ops; - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 616bec30dfa3..62c7c8eb00d9 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -11,12 +11,13 @@ #include <linux/bitops.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> +#include <linux/phy.h> +#include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include "ocelot_ana.h" #include "ocelot_dev.h" -#include "ocelot_hsio.h" #include "ocelot_qsys.h" #include "ocelot_rew.h" #include "ocelot_sys.h" @@ -333,79 +334,6 @@ enum ocelot_reg { SYS_CM_DATA_RD, SYS_CM_OP, SYS_CM_DATA, - HSIO_PLL5G_CFG0 = HSIO << TARGET_OFFSET, - HSIO_PLL5G_CFG1, - HSIO_PLL5G_CFG2, - HSIO_PLL5G_CFG3, - HSIO_PLL5G_CFG4, - HSIO_PLL5G_CFG5, - HSIO_PLL5G_CFG6, - HSIO_PLL5G_STATUS0, - HSIO_PLL5G_STATUS1, - HSIO_PLL5G_BIST_CFG0, - HSIO_PLL5G_BIST_CFG1, - HSIO_PLL5G_BIST_CFG2, - HSIO_PLL5G_BIST_STAT0, - HSIO_PLL5G_BIST_STAT1, - HSIO_RCOMP_CFG0, - HSIO_RCOMP_STATUS, - HSIO_SYNC_ETH_CFG, - HSIO_SYNC_ETH_PLL_CFG, - HSIO_S1G_DES_CFG, - HSIO_S1G_IB_CFG, - HSIO_S1G_OB_CFG, - HSIO_S1G_SER_CFG, - HSIO_S1G_COMMON_CFG, - HSIO_S1G_PLL_CFG, - HSIO_S1G_PLL_STATUS, - HSIO_S1G_DFT_CFG0, - HSIO_S1G_DFT_CFG1, - HSIO_S1G_DFT_CFG2, - HSIO_S1G_TP_CFG, - HSIO_S1G_RC_PLL_BIST_CFG, - HSIO_S1G_MISC_CFG, - HSIO_S1G_DFT_STATUS, - HSIO_S1G_MISC_STATUS, - HSIO_MCB_S1G_ADDR_CFG, - HSIO_S6G_DIG_CFG, - HSIO_S6G_DFT_CFG0, - HSIO_S6G_DFT_CFG1, - HSIO_S6G_DFT_CFG2, - HSIO_S6G_TP_CFG0, - HSIO_S6G_TP_CFG1, - HSIO_S6G_RC_PLL_BIST_CFG, - HSIO_S6G_MISC_CFG, - HSIO_S6G_OB_ANEG_CFG, - HSIO_S6G_DFT_STATUS, - HSIO_S6G_ERR_CNT, - HSIO_S6G_MISC_STATUS, - HSIO_S6G_DES_CFG, - HSIO_S6G_IB_CFG, - HSIO_S6G_IB_CFG1, - HSIO_S6G_IB_CFG2, - HSIO_S6G_IB_CFG3, - HSIO_S6G_IB_CFG4, - HSIO_S6G_IB_CFG5, - HSIO_S6G_OB_CFG, - HSIO_S6G_OB_CFG1, - HSIO_S6G_SER_CFG, - HSIO_S6G_COMMON_CFG, - HSIO_S6G_PLL_CFG, - HSIO_S6G_ACJTAG_CFG, - HSIO_S6G_GP_CFG, - HSIO_S6G_IB_STATUS0, - HSIO_S6G_IB_STATUS1, - HSIO_S6G_ACJTAG_STATUS, - HSIO_S6G_PLL_STATUS, - HSIO_S6G_REVID, - HSIO_MCB_S6G_ADDR_CFG, - HSIO_HW_CFG, - HSIO_HW_QSGMII_CFG, - HSIO_HW_QSGMII_STAT, - HSIO_CLK_CFG, - HSIO_TEMP_SENSOR_CTRL, - HSIO_TEMP_SENSOR_CFG, - HSIO_TEMP_SENSOR_STAT, }; enum ocelot_regfield { @@ -527,6 +455,9 @@ struct ocelot_port { u8 vlan_aware; u64 *stats; + + phy_interface_t phy_mode; + struct phy *serdes; }; u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset); diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 3cdf63e35b53..4c23d18bbf44 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -6,9 +6,11 @@ */ #include <linux/interrupt.h> #include <linux/module.h> +#include <linux/of_net.h> #include <linux/netdevice.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> +#include <linux/mfd/syscon.h> #include <linux/skbuff.h> #include "ocelot.h" @@ -126,11 +128,16 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) len += sz; } while (len < buf_len); - /* Read the FCS and discard it */ + /* Read the FCS */ sz = ocelot_rx_frame_word(ocelot, grp, false, &val); /* Update the statistics if part of the FCS was read before */ len -= ETH_FCS_LEN - sz; + if (unlikely(dev->features & NETIF_F_RXFCS)) { + buf = (u32 *)skb_put(skb, ETH_FCS_LEN); + *buf = val; + } + if (sz < 0) { err = sz; break; @@ -168,6 +175,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct device_node *ports, *portnp; struct ocelot *ocelot; + struct regmap *hsio; u32 val; struct { @@ -179,7 +187,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev) { QSYS, "qsys" }, { ANA, "ana" }, { QS, "qs" }, - { HSIO, "hsio" }, }; if (!np && !pdev->dev.platform_data) @@ -202,6 +209,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev) ocelot->targets[res[i].id] = target; } + hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio"); + if (IS_ERR(hsio)) { + dev_err(&pdev->dev, "missing hsio syscon\n"); + return PTR_ERR(hsio); + } + + ocelot->targets[HSIO] = hsio; + err = ocelot_chip_init(ocelot); if (err) return err; @@ -244,18 +259,11 @@ static int mscc_ocelot_probe(struct platform_device *pdev) INIT_LIST_HEAD(&ocelot->multicast); ocelot_init(ocelot); - ocelot_rmw(ocelot, HSIO_HW_CFG_DEV1G_4_MODE | - HSIO_HW_CFG_DEV1G_6_MODE | - HSIO_HW_CFG_DEV1G_9_MODE, - HSIO_HW_CFG_DEV1G_4_MODE | - HSIO_HW_CFG_DEV1G_6_MODE | - HSIO_HW_CFG_DEV1G_9_MODE, - HSIO_HW_CFG); - for_each_available_child_of_node(ports, portnp) { struct device_node *phy_node; struct phy_device *phy; struct resource *res; + struct phy *serdes; void __iomem *regs; char res_name[8]; u32 port; @@ -280,10 +288,43 @@ static int mscc_ocelot_probe(struct platform_device *pdev) continue; err = ocelot_probe_port(ocelot, port, regs, phy); - if (err) { - dev_err(&pdev->dev, "failed to probe ports\n"); + if (err) + return err; + + err = of_get_phy_mode(portnp); + if (err < 0) + ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA; + else + ocelot->ports[port]->phy_mode = err; + + switch (ocelot->ports[port]->phy_mode) { + case PHY_INTERFACE_MODE_NA: + continue; + case PHY_INTERFACE_MODE_SGMII: + break; + case PHY_INTERFACE_MODE_QSGMII: + break; + default: + dev_err(ocelot->dev, + "invalid phy mode for port%d, (Q)SGMII only\n", + port); + return -EINVAL; + } + + serdes = devm_of_phy_get(ocelot->dev, portnp, NULL); + if (IS_ERR(serdes)) { + err = PTR_ERR(serdes); + if (err == -EPROBE_DEFER) + dev_dbg(ocelot->dev, "deferring probe\n"); + else + dev_err(ocelot->dev, + "missing SerDes phys for port%d\n", + port); + goto err_probe_ports; } + + ocelot->ports[port]->serdes = serdes; } register_netdevice_notifier(&ocelot_netdevice_nb); diff --git a/drivers/net/ethernet/mscc/ocelot_dev_gmii.h b/drivers/net/ethernet/mscc/ocelot_dev_gmii.h deleted file mode 100644 index 6aa40ea223a2..000000000000 --- a/drivers/net/ethernet/mscc/ocelot_dev_gmii.h +++ /dev/null @@ -1,154 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ -/* - * Microsemi Ocelot Switch driver - * - * Copyright (c) 2017 Microsemi Corporation - */ - -#ifndef _MSCC_OCELOT_DEV_GMII_H_ -#define _MSCC_OCELOT_DEV_GMII_H_ - -#define DEV_GMII_PORT_MODE_CLOCK_CFG 0x0 - -#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_TX_RST BIT(5) -#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_RX_RST BIT(4) -#define DEV_GMII_PORT_MODE_CLOCK_CFG_PORT_RST BIT(3) -#define DEV_GMII_PORT_MODE_CLOCK_CFG_PHY_RST BIT(2) -#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0)) -#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0) - -#define DEV_GMII_PORT_MODE_PORT_MISC 0x4 - -#define DEV_GMII_PORT_MODE_PORT_MISC_MPLS_RX_ENA BIT(5) -#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_ERROR_ENA BIT(4) -#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_PAUSE_ENA BIT(3) -#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_CTRL_ENA BIT(2) -#define DEV_GMII_PORT_MODE_PORT_MISC_GMII_LOOP_ENA BIT(1) -#define DEV_GMII_PORT_MODE_PORT_MISC_DEV_LOOP_ENA BIT(0) - -#define DEV_GMII_PORT_MODE_EVENTS 0x8 - -#define DEV_GMII_PORT_MODE_EEE_CFG 0xc - -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_ENA BIT(22) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15)) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8)) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1)) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1) -#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1) -#define DEV_GMII_PORT_MODE_EEE_CFG_PORT_LPI BIT(0) - -#define DEV_GMII_PORT_MODE_RX_PATH_DELAY 0x10 - -#define DEV_GMII_PORT_MODE_TX_PATH_DELAY 0x14 - -#define DEV_GMII_PORT_MODE_PTP_PREDICT_CFG 0x18 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG 0x1c - -#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_RX_ENA BIT(4) -#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_TX_ENA BIT(0) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG 0x20 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8) -#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) -#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FDX_ENA BIT(0) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_MAXLEN_CFG 0x24 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG 0x28 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16)) -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16) -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16) -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_PB_ENA BIT(1) -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) -#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG 0x2c - -#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG 0x30 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8)) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4)) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0)) -#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG 0x34 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_OB_ENA BIT(25) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_WEXC_DIS BIT(24) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16)) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_M GENMASK(23, 16) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_LOAD BIT(12) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0)) -#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG 0x38 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_TBI_MODE BIT(4) -#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0) - -#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_LOW_CFG 0x3c - -#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_HIGH_CFG 0x40 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY 0x44 - -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_JUNK_STICKY BIT(5) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_JAM_STICKY BIT(3) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1) -#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_ABORT_STICKY BIT(0) - -#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG 0x48 - -#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA BIT(0) -#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA BIT(4) -#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_KEEP_S_AFTER_D BIT(8) - -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG 0x4c - -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS BIT(0) -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(x) (((x) << 4) & GENMASK(11, 4)) -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M GENMASK(11, 4) -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(x) (((x) & GENMASK(11, 4)) >> 4) -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS(x) (((x) << 12) & GENMASK(13, 12)) -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_M GENMASK(13, 12) -#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_X(x) (((x) & GENMASK(13, 12)) >> 12) - -#define DEV_GMII_MM_STATISTICS_MM_STATUS 0x50 - -#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STATUS BIT(0) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STICKY BIT(4) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE(x) (((x) << 8) & GENMASK(10, 8)) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_M GENMASK(10, 8) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_X(x) (((x) & GENMASK(10, 8)) >> 8) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_RX_PFRM_STICKY BIT(12) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_TX_PFRM_STICKY BIT(16) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_RX_FRAME_STATUS BIT(20) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_FRAME_STATUS BIT(24) -#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_PRMPT_STATUS BIT(28) - -#endif diff --git a/drivers/net/ethernet/mscc/ocelot_hsio.h b/drivers/net/ethernet/mscc/ocelot_hsio.h deleted file mode 100644 index d93ddec3931b..000000000000 --- a/drivers/net/ethernet/mscc/ocelot_hsio.h +++ /dev/null @@ -1,785 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ -/* - * Microsemi Ocelot Switch driver - * - * Copyright (c) 2017 Microsemi Corporation - */ - -#ifndef _MSCC_OCELOT_HSIO_H_ -#define _MSCC_OCELOT_HSIO_H_ - -#define HSIO_PLL5G_CFG0_ENA_ROT BIT(31) -#define HSIO_PLL5G_CFG0_ENA_LANE BIT(30) -#define HSIO_PLL5G_CFG0_ENA_CLKTREE BIT(29) -#define HSIO_PLL5G_CFG0_DIV4 BIT(28) -#define HSIO_PLL5G_CFG0_ENA_LOCK_FINE BIT(27) -#define HSIO_PLL5G_CFG0_SELBGV820(x) (((x) << 23) & GENMASK(26, 23)) -#define HSIO_PLL5G_CFG0_SELBGV820_M GENMASK(26, 23) -#define HSIO_PLL5G_CFG0_SELBGV820_X(x) (((x) & GENMASK(26, 23)) >> 23) -#define HSIO_PLL5G_CFG0_LOOP_BW_RES(x) (((x) << 18) & GENMASK(22, 18)) -#define HSIO_PLL5G_CFG0_LOOP_BW_RES_M GENMASK(22, 18) -#define HSIO_PLL5G_CFG0_LOOP_BW_RES_X(x) (((x) & GENMASK(22, 18)) >> 18) -#define HSIO_PLL5G_CFG0_SELCPI(x) (((x) << 16) & GENMASK(17, 16)) -#define HSIO_PLL5G_CFG0_SELCPI_M GENMASK(17, 16) -#define HSIO_PLL5G_CFG0_SELCPI_X(x) (((x) & GENMASK(17, 16)) >> 16) -#define HSIO_PLL5G_CFG0_ENA_VCO_CONTRH BIT(15) -#define HSIO_PLL5G_CFG0_ENA_CP1 BIT(14) -#define HSIO_PLL5G_CFG0_ENA_VCO_BUF BIT(13) -#define HSIO_PLL5G_CFG0_ENA_BIAS BIT(12) -#define HSIO_PLL5G_CFG0_CPU_CLK_DIV(x) (((x) << 6) & GENMASK(11, 6)) -#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_M GENMASK(11, 6) -#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_X(x) (((x) & GENMASK(11, 6)) >> 6) -#define HSIO_PLL5G_CFG0_CORE_CLK_DIV(x) ((x) & GENMASK(5, 0)) -#define HSIO_PLL5G_CFG0_CORE_CLK_DIV_M GENMASK(5, 0) - -#define HSIO_PLL5G_CFG1_ENA_DIRECT BIT(18) -#define HSIO_PLL5G_CFG1_ROT_SPEED BIT(17) -#define HSIO_PLL5G_CFG1_ROT_DIR BIT(16) -#define HSIO_PLL5G_CFG1_READBACK_DATA_SEL BIT(15) -#define HSIO_PLL5G_CFG1_RC_ENABLE BIT(14) -#define HSIO_PLL5G_CFG1_RC_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6)) -#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_M GENMASK(13, 6) -#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6) -#define HSIO_PLL5G_CFG1_QUARTER_RATE BIT(5) -#define HSIO_PLL5G_CFG1_PWD_TX BIT(4) -#define HSIO_PLL5G_CFG1_PWD_RX BIT(3) -#define HSIO_PLL5G_CFG1_OUT_OF_RANGE_RECAL_ENA BIT(2) -#define HSIO_PLL5G_CFG1_HALF_RATE BIT(1) -#define HSIO_PLL5G_CFG1_FORCE_SET_ENA BIT(0) - -#define HSIO_PLL5G_CFG2_ENA_TEST_MODE BIT(30) -#define HSIO_PLL5G_CFG2_ENA_PFD_IN_FLIP BIT(29) -#define HSIO_PLL5G_CFG2_ENA_VCO_NREF_TESTOUT BIT(28) -#define HSIO_PLL5G_CFG2_ENA_FBTESTOUT BIT(27) -#define HSIO_PLL5G_CFG2_ENA_RCPLL BIT(26) -#define HSIO_PLL5G_CFG2_ENA_CP2 BIT(25) -#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS1 BIT(24) -#define HSIO_PLL5G_CFG2_AMPC_SEL(x) (((x) << 16) & GENMASK(23, 16)) -#define HSIO_PLL5G_CFG2_AMPC_SEL_M GENMASK(23, 16) -#define HSIO_PLL5G_CFG2_AMPC_SEL_X(x) (((x) & GENMASK(23, 16)) >> 16) -#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS BIT(15) -#define HSIO_PLL5G_CFG2_PWD_AMPCTRL_N BIT(14) -#define HSIO_PLL5G_CFG2_ENA_AMPCTRL BIT(13) -#define HSIO_PLL5G_CFG2_ENA_AMP_CTRL_FORCE BIT(12) -#define HSIO_PLL5G_CFG2_FRC_FSM_POR BIT(11) -#define HSIO_PLL5G_CFG2_DISABLE_FSM_POR BIT(10) -#define HSIO_PLL5G_CFG2_GAIN_TEST(x) (((x) << 5) & GENMASK(9, 5)) -#define HSIO_PLL5G_CFG2_GAIN_TEST_M GENMASK(9, 5) -#define HSIO_PLL5G_CFG2_GAIN_TEST_X(x) (((x) & GENMASK(9, 5)) >> 5) -#define HSIO_PLL5G_CFG2_EN_RESET_OVERRUN BIT(4) -#define HSIO_PLL5G_CFG2_EN_RESET_LIM_DET BIT(3) -#define HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET BIT(2) -#define HSIO_PLL5G_CFG2_DISABLE_FSM BIT(1) -#define HSIO_PLL5G_CFG2_ENA_GAIN_TEST BIT(0) - -#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL(x) (((x) << 22) & GENMASK(23, 22)) -#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_M GENMASK(23, 22) -#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_X(x) (((x) & GENMASK(23, 22)) >> 22) -#define HSIO_PLL5G_CFG3_TESTOUT_SEL(x) (((x) << 19) & GENMASK(21, 19)) -#define HSIO_PLL5G_CFG3_TESTOUT_SEL_M GENMASK(21, 19) -#define HSIO_PLL5G_CFG3_TESTOUT_SEL_X(x) (((x) & GENMASK(21, 19)) >> 19) -#define HSIO_PLL5G_CFG3_ENA_ANA_TEST_OUT BIT(18) -#define HSIO_PLL5G_CFG3_ENA_TEST_OUT BIT(17) -#define HSIO_PLL5G_CFG3_SEL_FBDCLK BIT(16) -#define HSIO_PLL5G_CFG3_SEL_CML_CMOS_PFD BIT(15) -#define HSIO_PLL5G_CFG3_RST_FB_N BIT(14) -#define HSIO_PLL5G_CFG3_FORCE_VCO_CONTRH BIT(13) -#define HSIO_PLL5G_CFG3_FORCE_LO BIT(12) -#define HSIO_PLL5G_CFG3_FORCE_HI BIT(11) -#define HSIO_PLL5G_CFG3_FORCE_ENA BIT(10) -#define HSIO_PLL5G_CFG3_FORCE_CP BIT(9) -#define HSIO_PLL5G_CFG3_FBDIVSEL_TST_ENA BIT(8) -#define HSIO_PLL5G_CFG3_FBDIVSEL(x) ((x) & GENMASK(7, 0)) -#define HSIO_PLL5G_CFG3_FBDIVSEL_M GENMASK(7, 0) - -#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16)) -#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_M GENMASK(23, 16) -#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16) -#define HSIO_PLL5G_CFG4_IB_CTRL(x) ((x) & GENMASK(15, 0)) -#define HSIO_PLL5G_CFG4_IB_CTRL_M GENMASK(15, 0) - -#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16)) -#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_M GENMASK(23, 16) -#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16) -#define HSIO_PLL5G_CFG5_OB_CTRL(x) ((x) & GENMASK(15, 0)) -#define HSIO_PLL5G_CFG5_OB_CTRL_M GENMASK(15, 0) - -#define HSIO_PLL5G_CFG6_REFCLK_SEL_SRC BIT(23) -#define HSIO_PLL5G_CFG6_REFCLK_SEL(x) (((x) << 20) & GENMASK(22, 20)) -#define HSIO_PLL5G_CFG6_REFCLK_SEL_M GENMASK(22, 20) -#define HSIO_PLL5G_CFG6_REFCLK_SEL_X(x) (((x) & GENMASK(22, 20)) >> 20) -#define HSIO_PLL5G_CFG6_REFCLK_SRC BIT(19) -#define HSIO_PLL5G_CFG6_POR_DEL_SEL(x) (((x) << 16) & GENMASK(17, 16)) -#define HSIO_PLL5G_CFG6_POR_DEL_SEL_M GENMASK(17, 16) -#define HSIO_PLL5G_CFG6_POR_DEL_SEL_X(x) (((x) & GENMASK(17, 16)) >> 16) -#define HSIO_PLL5G_CFG6_DIV125REF_SEL(x) (((x) << 8) & GENMASK(15, 8)) -#define HSIO_PLL5G_CFG6_DIV125REF_SEL_M GENMASK(15, 8) -#define HSIO_PLL5G_CFG6_DIV125REF_SEL_X(x) (((x) & GENMASK(15, 8)) >> 8) -#define HSIO_PLL5G_CFG6_ENA_REFCLKC2 BIT(7) -#define HSIO_PLL5G_CFG6_ENA_FBCLKC2 BIT(6) -#define HSIO_PLL5G_CFG6_DDR_CLK_DIV(x) ((x) & GENMASK(5, 0)) -#define HSIO_PLL5G_CFG6_DDR_CLK_DIV_M GENMASK(5, 0) - -#define HSIO_PLL5G_STATUS0_RANGE_LIM BIT(12) -#define HSIO_PLL5G_STATUS0_OUT_OF_RANGE_ERR BIT(11) -#define HSIO_PLL5G_STATUS0_CALIBRATION_ERR BIT(10) -#define HSIO_PLL5G_STATUS0_CALIBRATION_DONE BIT(9) -#define HSIO_PLL5G_STATUS0_READBACK_DATA(x) (((x) << 1) & GENMASK(8, 1)) -#define HSIO_PLL5G_STATUS0_READBACK_DATA_M GENMASK(8, 1) -#define HSIO_PLL5G_STATUS0_READBACK_DATA_X(x) (((x) & GENMASK(8, 1)) >> 1) -#define HSIO_PLL5G_STATUS0_LOCK_STATUS BIT(0) - -#define HSIO_PLL5G_STATUS1_SIG_DEL(x) (((x) << 21) & GENMASK(28, 21)) -#define HSIO_PLL5G_STATUS1_SIG_DEL_M GENMASK(28, 21) -#define HSIO_PLL5G_STATUS1_SIG_DEL_X(x) (((x) & GENMASK(28, 21)) >> 21) -#define HSIO_PLL5G_STATUS1_GAIN_STAT(x) (((x) << 16) & GENMASK(20, 16)) -#define HSIO_PLL5G_STATUS1_GAIN_STAT_M GENMASK(20, 16) -#define HSIO_PLL5G_STATUS1_GAIN_STAT_X(x) (((x) & GENMASK(20, 16)) >> 16) -#define HSIO_PLL5G_STATUS1_FBCNT_DIF(x) (((x) << 4) & GENMASK(13, 4)) -#define HSIO_PLL5G_STATUS1_FBCNT_DIF_M GENMASK(13, 4) -#define HSIO_PLL5G_STATUS1_FBCNT_DIF_X(x) (((x) & GENMASK(13, 4)) >> 4) -#define HSIO_PLL5G_STATUS1_FSM_STAT(x) (((x) << 1) & GENMASK(3, 1)) -#define HSIO_PLL5G_STATUS1_FSM_STAT_M GENMASK(3, 1) -#define HSIO_PLL5G_STATUS1_FSM_STAT_X(x) (((x) & GENMASK(3, 1)) >> 1) -#define HSIO_PLL5G_STATUS1_FSM_LOCK BIT(0) - -#define HSIO_PLL5G_BIST_CFG0_PLLB_START_BIST BIT(31) -#define HSIO_PLL5G_BIST_CFG0_PLLB_MEAS_MODE BIT(30) -#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT(x) (((x) << 20) & GENMASK(23, 20)) -#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_M GENMASK(23, 20) -#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_X(x) (((x) & GENMASK(23, 20)) >> 20) -#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT(x) (((x) << 16) & GENMASK(19, 16)) -#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_M GENMASK(19, 16) -#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_X(x) (((x) & GENMASK(19, 16)) >> 16) -#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE(x) ((x) & GENMASK(15, 0)) -#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE_M GENMASK(15, 0) - -#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_M GENMASK(7, 4) -#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_PLL5G_BIST_STAT0_PLLB_BUSY BIT(2) -#define HSIO_PLL5G_BIST_STAT0_PLLB_DONE_N BIT(1) -#define HSIO_PLL5G_BIST_STAT0_PLLB_FAIL BIT(0) - -#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT(x) (((x) << 16) & GENMASK(31, 16)) -#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_M GENMASK(31, 16) -#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_X(x) (((x) & GENMASK(31, 16)) >> 16) -#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF(x) ((x) & GENMASK(15, 0)) -#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF_M GENMASK(15, 0) - -#define HSIO_RCOMP_CFG0_PWD_ENA BIT(13) -#define HSIO_RCOMP_CFG0_RUN_CAL BIT(12) -#define HSIO_RCOMP_CFG0_SPEED_SEL(x) (((x) << 10) & GENMASK(11, 10)) -#define HSIO_RCOMP_CFG0_SPEED_SEL_M GENMASK(11, 10) -#define HSIO_RCOMP_CFG0_SPEED_SEL_X(x) (((x) & GENMASK(11, 10)) >> 10) -#define HSIO_RCOMP_CFG0_MODE_SEL(x) (((x) << 8) & GENMASK(9, 8)) -#define HSIO_RCOMP_CFG0_MODE_SEL_M GENMASK(9, 8) -#define HSIO_RCOMP_CFG0_MODE_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8) -#define HSIO_RCOMP_CFG0_FORCE_ENA BIT(4) -#define HSIO_RCOMP_CFG0_RCOMP_VAL(x) ((x) & GENMASK(3, 0)) -#define HSIO_RCOMP_CFG0_RCOMP_VAL_M GENMASK(3, 0) - -#define HSIO_RCOMP_STATUS_BUSY BIT(12) -#define HSIO_RCOMP_STATUS_DELTA_ALERT BIT(7) -#define HSIO_RCOMP_STATUS_RCOMP(x) ((x) & GENMASK(3, 0)) -#define HSIO_RCOMP_STATUS_RCOMP_M GENMASK(3, 0) - -#define HSIO_SYNC_ETH_CFG_RSZ 0x4 - -#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_M GENMASK(7, 4) -#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV(x) (((x) << 1) & GENMASK(3, 1)) -#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_M GENMASK(3, 1) -#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_X(x) (((x) & GENMASK(3, 1)) >> 1) -#define HSIO_SYNC_ETH_CFG_RECO_CLK_ENA BIT(0) - -#define HSIO_SYNC_ETH_PLL_CFG_PLL_AUTO_SQUELCH_ENA BIT(0) - -#define HSIO_S1G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13)) -#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13) -#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) -#define HSIO_S1G_DES_CFG_DES_CPMD_SEL(x) (((x) << 11) & GENMASK(12, 11)) -#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_M GENMASK(12, 11) -#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(12, 11)) >> 11) -#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 8) & GENMASK(10, 8)) -#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_M GENMASK(10, 8) -#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(10, 8)) >> 8) -#define HSIO_S1G_DES_CFG_DES_BW_ANA(x) (((x) << 5) & GENMASK(7, 5)) -#define HSIO_S1G_DES_CFG_DES_BW_ANA_M GENMASK(7, 5) -#define HSIO_S1G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(7, 5)) >> 5) -#define HSIO_S1G_DES_CFG_DES_SWAP_ANA BIT(4) -#define HSIO_S1G_DES_CFG_DES_BW_HYST(x) (((x) << 1) & GENMASK(3, 1)) -#define HSIO_S1G_DES_CFG_DES_BW_HYST_M GENMASK(3, 1) -#define HSIO_S1G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(3, 1)) >> 1) -#define HSIO_S1G_DES_CFG_DES_SWAP_HYST BIT(0) - -#define HSIO_S1G_IB_CFG_IB_FX100_ENA BIT(27) -#define HSIO_S1G_IB_CFG_ACJTAG_HYST(x) (((x) << 24) & GENMASK(26, 24)) -#define HSIO_S1G_IB_CFG_ACJTAG_HYST_M GENMASK(26, 24) -#define HSIO_S1G_IB_CFG_ACJTAG_HYST_X(x) (((x) & GENMASK(26, 24)) >> 24) -#define HSIO_S1G_IB_CFG_IB_DET_LEV(x) (((x) << 19) & GENMASK(21, 19)) -#define HSIO_S1G_IB_CFG_IB_DET_LEV_M GENMASK(21, 19) -#define HSIO_S1G_IB_CFG_IB_DET_LEV_X(x) (((x) & GENMASK(21, 19)) >> 19) -#define HSIO_S1G_IB_CFG_IB_HYST_LEV BIT(14) -#define HSIO_S1G_IB_CFG_IB_ENA_CMV_TERM BIT(13) -#define HSIO_S1G_IB_CFG_IB_ENA_DC_COUPLING BIT(12) -#define HSIO_S1G_IB_CFG_IB_ENA_DETLEV BIT(11) -#define HSIO_S1G_IB_CFG_IB_ENA_HYST BIT(10) -#define HSIO_S1G_IB_CFG_IB_ENA_OFFSET_COMP BIT(9) -#define HSIO_S1G_IB_CFG_IB_EQ_GAIN(x) (((x) << 6) & GENMASK(8, 6)) -#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_M GENMASK(8, 6) -#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_X(x) (((x) & GENMASK(8, 6)) >> 6) -#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ(x) (((x) << 4) & GENMASK(5, 4)) -#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_M GENMASK(5, 4) -#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_X(x) (((x) & GENMASK(5, 4)) >> 4) -#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) -#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL_M GENMASK(3, 0) - -#define HSIO_S1G_OB_CFG_OB_SLP(x) (((x) << 17) & GENMASK(18, 17)) -#define HSIO_S1G_OB_CFG_OB_SLP_M GENMASK(18, 17) -#define HSIO_S1G_OB_CFG_OB_SLP_X(x) (((x) & GENMASK(18, 17)) >> 17) -#define HSIO_S1G_OB_CFG_OB_AMP_CTRL(x) (((x) << 13) & GENMASK(16, 13)) -#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_M GENMASK(16, 13) -#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) -#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL(x) (((x) << 10) & GENMASK(12, 10)) -#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_M GENMASK(12, 10) -#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10) -#define HSIO_S1G_OB_CFG_OB_DIS_VCM_CTRL BIT(9) -#define HSIO_S1G_OB_CFG_OB_EN_MEAS_VREG BIT(8) -#define HSIO_S1G_OB_CFG_OB_VCM_CTRL(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_M GENMASK(7, 4) -#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) -#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0) - -#define HSIO_S1G_SER_CFG_SER_IDLE BIT(9) -#define HSIO_S1G_SER_CFG_SER_DEEMPH BIT(8) -#define HSIO_S1G_SER_CFG_SER_CPMD_SEL BIT(7) -#define HSIO_S1G_SER_CFG_SER_SWAP_CPMD BIT(6) -#define HSIO_S1G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4)) -#define HSIO_S1G_SER_CFG_SER_ALISEL_M GENMASK(5, 4) -#define HSIO_S1G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4) -#define HSIO_S1G_SER_CFG_SER_ENHYS BIT(3) -#define HSIO_S1G_SER_CFG_SER_BIG_WIN BIT(2) -#define HSIO_S1G_SER_CFG_SER_EN_WIN BIT(1) -#define HSIO_S1G_SER_CFG_SER_ENALI BIT(0) - -#define HSIO_S1G_COMMON_CFG_SYS_RST BIT(31) -#define HSIO_S1G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(21) -#define HSIO_S1G_COMMON_CFG_ENA_LANE BIT(18) -#define HSIO_S1G_COMMON_CFG_PWD_RX BIT(17) -#define HSIO_S1G_COMMON_CFG_PWD_TX BIT(16) -#define HSIO_S1G_COMMON_CFG_LANE_CTRL(x) (((x) << 13) & GENMASK(15, 13)) -#define HSIO_S1G_COMMON_CFG_LANE_CTRL_M GENMASK(15, 13) -#define HSIO_S1G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(15, 13)) >> 13) -#define HSIO_S1G_COMMON_CFG_ENA_DIRECT BIT(12) -#define HSIO_S1G_COMMON_CFG_ENA_ELOOP BIT(11) -#define HSIO_S1G_COMMON_CFG_ENA_FLOOP BIT(10) -#define HSIO_S1G_COMMON_CFG_ENA_ILOOP BIT(9) -#define HSIO_S1G_COMMON_CFG_ENA_PLOOP BIT(8) -#define HSIO_S1G_COMMON_CFG_HRATE BIT(7) -#define HSIO_S1G_COMMON_CFG_IF_MODE BIT(0) - -#define HSIO_S1G_PLL_CFG_PLL_ENA_FB_DIV2 BIT(22) -#define HSIO_S1G_PLL_CFG_PLL_ENA_RC_DIV2 BIT(21) -#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 8) & GENMASK(15, 8)) -#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(15, 8) -#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(15, 8)) >> 8) -#define HSIO_S1G_PLL_CFG_PLL_FSM_ENA BIT(7) -#define HSIO_S1G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(6) -#define HSIO_S1G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(5) -#define HSIO_S1G_PLL_CFG_PLL_RB_DATA_SEL BIT(3) - -#define HSIO_S1G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(12) -#define HSIO_S1G_PLL_STATUS_PLL_CAL_ERR BIT(11) -#define HSIO_S1G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(10) -#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0)) -#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0) - -#define HSIO_S1G_DFT_CFG0_LAZYBIT BIT(31) -#define HSIO_S1G_DFT_CFG0_INV_DIS BIT(23) -#define HSIO_S1G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20)) -#define HSIO_S1G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20) -#define HSIO_S1G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20) -#define HSIO_S1G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16)) -#define HSIO_S1G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16) -#define HSIO_S1G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16) -#define HSIO_S1G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4) -#define HSIO_S1G_DFT_CFG0_RX_PDSENS_ENA BIT(3) -#define HSIO_S1G_DFT_CFG0_RX_DFT_ENA BIT(2) -#define HSIO_S1G_DFT_CFG0_TX_DFT_ENA BIT(0) - -#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) -#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8) -#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) -#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4) -#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_S1G_DFT_CFG1_TX_JI_ENA BIT(3) -#define HSIO_S1G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2) -#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_DIR BIT(1) -#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_ENA BIT(0) - -#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) -#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8) -#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) -#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4) -#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_S1G_DFT_CFG2_RX_JI_ENA BIT(3) -#define HSIO_S1G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2) -#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_DIR BIT(1) -#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_ENA BIT(0) - -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(17, 16)) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(17, 16) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(17, 16)) >> 16) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8)) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0)) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0) - -#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11)) -#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11) -#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11) -#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10) -#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9) -#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8) -#define HSIO_S1G_MISC_CFG_RX_LPI_MODE_ENA BIT(5) -#define HSIO_S1G_MISC_CFG_TX_LPI_MODE_ENA BIT(4) -#define HSIO_S1G_MISC_CFG_RX_DATA_INV_ENA BIT(3) -#define HSIO_S1G_MISC_CFG_TX_DATA_INV_ENA BIT(2) -#define HSIO_S1G_MISC_CFG_LANE_RST BIT(0) - -#define HSIO_S1G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7) -#define HSIO_S1G_DFT_STATUS_PLL_BIST_FAILED BIT(6) -#define HSIO_S1G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5) -#define HSIO_S1G_DFT_STATUS_BIST_ACTIVE BIT(3) -#define HSIO_S1G_DFT_STATUS_BIST_NOSYNC BIT(2) -#define HSIO_S1G_DFT_STATUS_BIST_COMPLETE_N BIT(1) -#define HSIO_S1G_DFT_STATUS_BIST_ERROR BIT(0) - -#define HSIO_S1G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0) - -#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT BIT(31) -#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT BIT(30) -#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(x) ((x) & GENMASK(8, 0)) -#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR_M GENMASK(8, 0) - -#define HSIO_S6G_DIG_CFG_GP(x) (((x) << 16) & GENMASK(18, 16)) -#define HSIO_S6G_DIG_CFG_GP_M GENMASK(18, 16) -#define HSIO_S6G_DIG_CFG_GP_X(x) (((x) & GENMASK(18, 16)) >> 16) -#define HSIO_S6G_DIG_CFG_TX_BIT_DOUBLING_MODE_ENA BIT(7) -#define HSIO_S6G_DIG_CFG_SIGDET_TESTMODE BIT(6) -#define HSIO_S6G_DIG_CFG_SIGDET_AST(x) (((x) << 3) & GENMASK(5, 3)) -#define HSIO_S6G_DIG_CFG_SIGDET_AST_M GENMASK(5, 3) -#define HSIO_S6G_DIG_CFG_SIGDET_AST_X(x) (((x) & GENMASK(5, 3)) >> 3) -#define HSIO_S6G_DIG_CFG_SIGDET_DST(x) ((x) & GENMASK(2, 0)) -#define HSIO_S6G_DIG_CFG_SIGDET_DST_M GENMASK(2, 0) - -#define HSIO_S6G_DFT_CFG0_LAZYBIT BIT(31) -#define HSIO_S6G_DFT_CFG0_INV_DIS BIT(23) -#define HSIO_S6G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20)) -#define HSIO_S6G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20) -#define HSIO_S6G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20) -#define HSIO_S6G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16)) -#define HSIO_S6G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16) -#define HSIO_S6G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16) -#define HSIO_S6G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4) -#define HSIO_S6G_DFT_CFG0_RX_PDSENS_ENA BIT(3) -#define HSIO_S6G_DFT_CFG0_RX_DFT_ENA BIT(2) -#define HSIO_S6G_DFT_CFG0_TX_DFT_ENA BIT(0) - -#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) -#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8) -#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) -#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4) -#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_S6G_DFT_CFG1_TX_JI_ENA BIT(3) -#define HSIO_S6G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2) -#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_DIR BIT(1) -#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_ENA BIT(0) - -#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) -#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8) -#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) -#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4) -#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_S6G_DFT_CFG2_RX_JI_ENA BIT(3) -#define HSIO_S6G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2) -#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_DIR BIT(1) -#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_ENA BIT(0) - -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(19, 16)) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(19, 16) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(19, 16)) >> 16) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8)) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0)) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0) - -#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK(x) (((x) << 13) & GENMASK(14, 13)) -#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_M GENMASK(14, 13) -#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_X(x) (((x) & GENMASK(14, 13)) >> 13) -#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11)) -#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11) -#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11) -#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10) -#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9) -#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8) -#define HSIO_S6G_MISC_CFG_RX_BUS_FLIP_ENA BIT(7) -#define HSIO_S6G_MISC_CFG_TX_BUS_FLIP_ENA BIT(6) -#define HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA BIT(5) -#define HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA BIT(4) -#define HSIO_S6G_MISC_CFG_RX_DATA_INV_ENA BIT(3) -#define HSIO_S6G_MISC_CFG_TX_DATA_INV_ENA BIT(2) -#define HSIO_S6G_MISC_CFG_LANE_RST BIT(0) - -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0(x) (((x) << 23) & GENMASK(28, 23)) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_M GENMASK(28, 23) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1(x) (((x) << 18) & GENMASK(22, 18)) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_M GENMASK(22, 18) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_X(x) (((x) & GENMASK(22, 18)) >> 18) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC(x) (((x) << 13) & GENMASK(17, 13)) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_M GENMASK(17, 13) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_X(x) (((x) & GENMASK(17, 13)) >> 13) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6)) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_M GENMASK(8, 6) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV(x) ((x) & GENMASK(5, 0)) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV_M GENMASK(5, 0) - -#define HSIO_S6G_DFT_STATUS_PRBS_SYNC_STAT BIT(8) -#define HSIO_S6G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7) -#define HSIO_S6G_DFT_STATUS_PLL_BIST_FAILED BIT(6) -#define HSIO_S6G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5) -#define HSIO_S6G_DFT_STATUS_BIST_ACTIVE BIT(3) -#define HSIO_S6G_DFT_STATUS_BIST_NOSYNC BIT(2) -#define HSIO_S6G_DFT_STATUS_BIST_COMPLETE_N BIT(1) -#define HSIO_S6G_DFT_STATUS_BIST_ERROR BIT(0) - -#define HSIO_S6G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0) - -#define HSIO_S6G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13)) -#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13) -#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) -#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 10) & GENMASK(12, 10)) -#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_M GENMASK(12, 10) -#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10) -#define HSIO_S6G_DES_CFG_DES_CPMD_SEL(x) (((x) << 8) & GENMASK(9, 8)) -#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_M GENMASK(9, 8) -#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8) -#define HSIO_S6G_DES_CFG_DES_BW_HYST(x) (((x) << 5) & GENMASK(7, 5)) -#define HSIO_S6G_DES_CFG_DES_BW_HYST_M GENMASK(7, 5) -#define HSIO_S6G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(7, 5)) >> 5) -#define HSIO_S6G_DES_CFG_DES_SWAP_HYST BIT(4) -#define HSIO_S6G_DES_CFG_DES_BW_ANA(x) (((x) << 1) & GENMASK(3, 1)) -#define HSIO_S6G_DES_CFG_DES_BW_ANA_M GENMASK(3, 1) -#define HSIO_S6G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(3, 1)) >> 1) -#define HSIO_S6G_DES_CFG_DES_SWAP_ANA BIT(0) - -#define HSIO_S6G_IB_CFG_IB_SOFSI(x) (((x) << 29) & GENMASK(30, 29)) -#define HSIO_S6G_IB_CFG_IB_SOFSI_M GENMASK(30, 29) -#define HSIO_S6G_IB_CFG_IB_SOFSI_X(x) (((x) & GENMASK(30, 29)) >> 29) -#define HSIO_S6G_IB_CFG_IB_VBULK_SEL BIT(28) -#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ(x) (((x) << 24) & GENMASK(27, 24)) -#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_M GENMASK(27, 24) -#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_X(x) (((x) & GENMASK(27, 24)) >> 24) -#define HSIO_S6G_IB_CFG_IB_ICML_ADJ(x) (((x) << 20) & GENMASK(23, 20)) -#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_M GENMASK(23, 20) -#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_X(x) (((x) & GENMASK(23, 20)) >> 20) -#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL(x) (((x) << 18) & GENMASK(19, 18)) -#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_M GENMASK(19, 18) -#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_X(x) (((x) & GENMASK(19, 18)) >> 18) -#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(x) (((x) << 15) & GENMASK(17, 15)) -#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M GENMASK(17, 15) -#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_X(x) (((x) & GENMASK(17, 15)) >> 15) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP(x) (((x) << 13) & GENMASK(14, 13)) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_M GENMASK(14, 13) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_X(x) (((x) & GENMASK(14, 13)) >> 13) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID(x) (((x) << 11) & GENMASK(12, 11)) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_M GENMASK(12, 11) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_X(x) (((x) & GENMASK(12, 11)) >> 11) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP(x) (((x) << 9) & GENMASK(10, 9)) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_M GENMASK(10, 9) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_X(x) (((x) & GENMASK(10, 9)) >> 9) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(x) (((x) << 7) & GENMASK(8, 7)) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M GENMASK(8, 7) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_X(x) (((x) & GENMASK(8, 7)) >> 7) -#define HSIO_S6G_IB_CFG_IB_ANA_TEST_ENA BIT(6) -#define HSIO_S6G_IB_CFG_IB_SIG_DET_ENA BIT(5) -#define HSIO_S6G_IB_CFG_IB_CONCUR BIT(4) -#define HSIO_S6G_IB_CFG_IB_CAL_ENA BIT(3) -#define HSIO_S6G_IB_CFG_IB_SAM_ENA BIT(2) -#define HSIO_S6G_IB_CFG_IB_EQZ_ENA BIT(1) -#define HSIO_S6G_IB_CFG_IB_REG_ENA BIT(0) - -#define HSIO_S6G_IB_CFG1_IB_TJTAG(x) (((x) << 17) & GENMASK(21, 17)) -#define HSIO_S6G_IB_CFG1_IB_TJTAG_M GENMASK(21, 17) -#define HSIO_S6G_IB_CFG1_IB_TJTAG_X(x) (((x) & GENMASK(21, 17)) >> 17) -#define HSIO_S6G_IB_CFG1_IB_TSDET(x) (((x) << 12) & GENMASK(16, 12)) -#define HSIO_S6G_IB_CFG1_IB_TSDET_M GENMASK(16, 12) -#define HSIO_S6G_IB_CFG1_IB_TSDET_X(x) (((x) & GENMASK(16, 12)) >> 12) -#define HSIO_S6G_IB_CFG1_IB_SCALY(x) (((x) << 8) & GENMASK(11, 8)) -#define HSIO_S6G_IB_CFG1_IB_SCALY_M GENMASK(11, 8) -#define HSIO_S6G_IB_CFG1_IB_SCALY_X(x) (((x) & GENMASK(11, 8)) >> 8) -#define HSIO_S6G_IB_CFG1_IB_FILT_HP BIT(7) -#define HSIO_S6G_IB_CFG1_IB_FILT_MID BIT(6) -#define HSIO_S6G_IB_CFG1_IB_FILT_LP BIT(5) -#define HSIO_S6G_IB_CFG1_IB_FILT_OFFSET BIT(4) -#define HSIO_S6G_IB_CFG1_IB_FRC_HP BIT(3) -#define HSIO_S6G_IB_CFG1_IB_FRC_MID BIT(2) -#define HSIO_S6G_IB_CFG1_IB_FRC_LP BIT(1) -#define HSIO_S6G_IB_CFG1_IB_FRC_OFFSET BIT(0) - -#define HSIO_S6G_IB_CFG2_IB_TINFV(x) (((x) << 27) & GENMASK(29, 27)) -#define HSIO_S6G_IB_CFG2_IB_TINFV_M GENMASK(29, 27) -#define HSIO_S6G_IB_CFG2_IB_TINFV_X(x) (((x) & GENMASK(29, 27)) >> 27) -#define HSIO_S6G_IB_CFG2_IB_OINFI(x) (((x) << 22) & GENMASK(26, 22)) -#define HSIO_S6G_IB_CFG2_IB_OINFI_M GENMASK(26, 22) -#define HSIO_S6G_IB_CFG2_IB_OINFI_X(x) (((x) & GENMASK(26, 22)) >> 22) -#define HSIO_S6G_IB_CFG2_IB_TAUX(x) (((x) << 19) & GENMASK(21, 19)) -#define HSIO_S6G_IB_CFG2_IB_TAUX_M GENMASK(21, 19) -#define HSIO_S6G_IB_CFG2_IB_TAUX_X(x) (((x) & GENMASK(21, 19)) >> 19) -#define HSIO_S6G_IB_CFG2_IB_OINFS(x) (((x) << 16) & GENMASK(18, 16)) -#define HSIO_S6G_IB_CFG2_IB_OINFS_M GENMASK(18, 16) -#define HSIO_S6G_IB_CFG2_IB_OINFS_X(x) (((x) & GENMASK(18, 16)) >> 16) -#define HSIO_S6G_IB_CFG2_IB_OCALS(x) (((x) << 10) & GENMASK(15, 10)) -#define HSIO_S6G_IB_CFG2_IB_OCALS_M GENMASK(15, 10) -#define HSIO_S6G_IB_CFG2_IB_OCALS_X(x) (((x) & GENMASK(15, 10)) >> 10) -#define HSIO_S6G_IB_CFG2_IB_TCALV(x) (((x) << 5) & GENMASK(9, 5)) -#define HSIO_S6G_IB_CFG2_IB_TCALV_M GENMASK(9, 5) -#define HSIO_S6G_IB_CFG2_IB_TCALV_X(x) (((x) & GENMASK(9, 5)) >> 5) -#define HSIO_S6G_IB_CFG2_IB_UMAX(x) (((x) << 3) & GENMASK(4, 3)) -#define HSIO_S6G_IB_CFG2_IB_UMAX_M GENMASK(4, 3) -#define HSIO_S6G_IB_CFG2_IB_UMAX_X(x) (((x) & GENMASK(4, 3)) >> 3) -#define HSIO_S6G_IB_CFG2_IB_UREG(x) ((x) & GENMASK(2, 0)) -#define HSIO_S6G_IB_CFG2_IB_UREG_M GENMASK(2, 0) - -#define HSIO_S6G_IB_CFG3_IB_INI_HP(x) (((x) << 18) & GENMASK(23, 18)) -#define HSIO_S6G_IB_CFG3_IB_INI_HP_M GENMASK(23, 18) -#define HSIO_S6G_IB_CFG3_IB_INI_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) -#define HSIO_S6G_IB_CFG3_IB_INI_MID(x) (((x) << 12) & GENMASK(17, 12)) -#define HSIO_S6G_IB_CFG3_IB_INI_MID_M GENMASK(17, 12) -#define HSIO_S6G_IB_CFG3_IB_INI_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) -#define HSIO_S6G_IB_CFG3_IB_INI_LP(x) (((x) << 6) & GENMASK(11, 6)) -#define HSIO_S6G_IB_CFG3_IB_INI_LP_M GENMASK(11, 6) -#define HSIO_S6G_IB_CFG3_IB_INI_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) -#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET(x) ((x) & GENMASK(5, 0)) -#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M GENMASK(5, 0) - -#define HSIO_S6G_IB_CFG4_IB_MAX_HP(x) (((x) << 18) & GENMASK(23, 18)) -#define HSIO_S6G_IB_CFG4_IB_MAX_HP_M GENMASK(23, 18) -#define HSIO_S6G_IB_CFG4_IB_MAX_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) -#define HSIO_S6G_IB_CFG4_IB_MAX_MID(x) (((x) << 12) & GENMASK(17, 12)) -#define HSIO_S6G_IB_CFG4_IB_MAX_MID_M GENMASK(17, 12) -#define HSIO_S6G_IB_CFG4_IB_MAX_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) -#define HSIO_S6G_IB_CFG4_IB_MAX_LP(x) (((x) << 6) & GENMASK(11, 6)) -#define HSIO_S6G_IB_CFG4_IB_MAX_LP_M GENMASK(11, 6) -#define HSIO_S6G_IB_CFG4_IB_MAX_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) -#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET(x) ((x) & GENMASK(5, 0)) -#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET_M GENMASK(5, 0) - -#define HSIO_S6G_IB_CFG5_IB_MIN_HP(x) (((x) << 18) & GENMASK(23, 18)) -#define HSIO_S6G_IB_CFG5_IB_MIN_HP_M GENMASK(23, 18) -#define HSIO_S6G_IB_CFG5_IB_MIN_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) -#define HSIO_S6G_IB_CFG5_IB_MIN_MID(x) (((x) << 12) & GENMASK(17, 12)) -#define HSIO_S6G_IB_CFG5_IB_MIN_MID_M GENMASK(17, 12) -#define HSIO_S6G_IB_CFG5_IB_MIN_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) -#define HSIO_S6G_IB_CFG5_IB_MIN_LP(x) (((x) << 6) & GENMASK(11, 6)) -#define HSIO_S6G_IB_CFG5_IB_MIN_LP_M GENMASK(11, 6) -#define HSIO_S6G_IB_CFG5_IB_MIN_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) -#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET(x) ((x) & GENMASK(5, 0)) -#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET_M GENMASK(5, 0) - -#define HSIO_S6G_OB_CFG_OB_IDLE BIT(31) -#define HSIO_S6G_OB_CFG_OB_ENA1V_MODE BIT(30) -#define HSIO_S6G_OB_CFG_OB_POL BIT(29) -#define HSIO_S6G_OB_CFG_OB_POST0(x) (((x) << 23) & GENMASK(28, 23)) -#define HSIO_S6G_OB_CFG_OB_POST0_M GENMASK(28, 23) -#define HSIO_S6G_OB_CFG_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23) -#define HSIO_S6G_OB_CFG_OB_PREC(x) (((x) << 18) & GENMASK(22, 18)) -#define HSIO_S6G_OB_CFG_OB_PREC_M GENMASK(22, 18) -#define HSIO_S6G_OB_CFG_OB_PREC_X(x) (((x) & GENMASK(22, 18)) >> 18) -#define HSIO_S6G_OB_CFG_OB_R_ADJ_MUX BIT(17) -#define HSIO_S6G_OB_CFG_OB_R_ADJ_PDR BIT(16) -#define HSIO_S6G_OB_CFG_OB_POST1(x) (((x) << 11) & GENMASK(15, 11)) -#define HSIO_S6G_OB_CFG_OB_POST1_M GENMASK(15, 11) -#define HSIO_S6G_OB_CFG_OB_POST1_X(x) (((x) & GENMASK(15, 11)) >> 11) -#define HSIO_S6G_OB_CFG_OB_R_COR BIT(10) -#define HSIO_S6G_OB_CFG_OB_SEL_RCTRL BIT(9) -#define HSIO_S6G_OB_CFG_OB_SR_H BIT(8) -#define HSIO_S6G_OB_CFG_OB_SR(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_S6G_OB_CFG_OB_SR_M GENMASK(7, 4) -#define HSIO_S6G_OB_CFG_OB_SR_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) -#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0) - -#define HSIO_S6G_OB_CFG1_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6)) -#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_M GENMASK(8, 6) -#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6) -#define HSIO_S6G_OB_CFG1_OB_LEV(x) ((x) & GENMASK(5, 0)) -#define HSIO_S6G_OB_CFG1_OB_LEV_M GENMASK(5, 0) - -#define HSIO_S6G_SER_CFG_SER_4TAP_ENA BIT(8) -#define HSIO_S6G_SER_CFG_SER_CPMD_SEL BIT(7) -#define HSIO_S6G_SER_CFG_SER_SWAP_CPMD BIT(6) -#define HSIO_S6G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4)) -#define HSIO_S6G_SER_CFG_SER_ALISEL_M GENMASK(5, 4) -#define HSIO_S6G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4) -#define HSIO_S6G_SER_CFG_SER_ENHYS BIT(3) -#define HSIO_S6G_SER_CFG_SER_BIG_WIN BIT(2) -#define HSIO_S6G_SER_CFG_SER_EN_WIN BIT(1) -#define HSIO_S6G_SER_CFG_SER_ENALI BIT(0) - -#define HSIO_S6G_COMMON_CFG_SYS_RST BIT(17) -#define HSIO_S6G_COMMON_CFG_SE_DIV2_ENA BIT(16) -#define HSIO_S6G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(15) -#define HSIO_S6G_COMMON_CFG_ENA_LANE BIT(14) -#define HSIO_S6G_COMMON_CFG_PWD_RX BIT(13) -#define HSIO_S6G_COMMON_CFG_PWD_TX BIT(12) -#define HSIO_S6G_COMMON_CFG_LANE_CTRL(x) (((x) << 9) & GENMASK(11, 9)) -#define HSIO_S6G_COMMON_CFG_LANE_CTRL_M GENMASK(11, 9) -#define HSIO_S6G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(11, 9)) >> 9) -#define HSIO_S6G_COMMON_CFG_ENA_DIRECT BIT(8) -#define HSIO_S6G_COMMON_CFG_ENA_ELOOP BIT(7) -#define HSIO_S6G_COMMON_CFG_ENA_FLOOP BIT(6) -#define HSIO_S6G_COMMON_CFG_ENA_ILOOP BIT(5) -#define HSIO_S6G_COMMON_CFG_ENA_PLOOP BIT(4) -#define HSIO_S6G_COMMON_CFG_HRATE BIT(3) -#define HSIO_S6G_COMMON_CFG_QRATE BIT(2) -#define HSIO_S6G_COMMON_CFG_IF_MODE(x) ((x) & GENMASK(1, 0)) -#define HSIO_S6G_COMMON_CFG_IF_MODE_M GENMASK(1, 0) - -#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS(x) (((x) << 16) & GENMASK(17, 16)) -#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_M GENMASK(17, 16) -#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_X(x) (((x) & GENMASK(17, 16)) >> 16) -#define HSIO_S6G_PLL_CFG_PLL_DIV4 BIT(15) -#define HSIO_S6G_PLL_CFG_PLL_ENA_ROT BIT(14) -#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6)) -#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(13, 6) -#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6) -#define HSIO_S6G_PLL_CFG_PLL_FSM_ENA BIT(5) -#define HSIO_S6G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(4) -#define HSIO_S6G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(3) -#define HSIO_S6G_PLL_CFG_PLL_RB_DATA_SEL BIT(2) -#define HSIO_S6G_PLL_CFG_PLL_ROT_DIR BIT(1) -#define HSIO_S6G_PLL_CFG_PLL_ROT_FRQ BIT(0) - -#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_N BIT(5) -#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_P BIT(4) -#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_CLK BIT(3) -#define HSIO_S6G_ACJTAG_CFG_OB_DIRECT BIT(2) -#define HSIO_S6G_ACJTAG_CFG_ACJTAG_ENA BIT(1) -#define HSIO_S6G_ACJTAG_CFG_JTAG_CTRL_ENA BIT(0) - -#define HSIO_S6G_GP_CFG_GP_MSB(x) (((x) << 16) & GENMASK(31, 16)) -#define HSIO_S6G_GP_CFG_GP_MSB_M GENMASK(31, 16) -#define HSIO_S6G_GP_CFG_GP_MSB_X(x) (((x) & GENMASK(31, 16)) >> 16) -#define HSIO_S6G_GP_CFG_GP_LSB(x) ((x) & GENMASK(15, 0)) -#define HSIO_S6G_GP_CFG_GP_LSB_M GENMASK(15, 0) - -#define HSIO_S6G_IB_STATUS0_IB_CAL_DONE BIT(8) -#define HSIO_S6G_IB_STATUS0_IB_HP_GAIN_ACT BIT(7) -#define HSIO_S6G_IB_STATUS0_IB_MID_GAIN_ACT BIT(6) -#define HSIO_S6G_IB_STATUS0_IB_LP_GAIN_ACT BIT(5) -#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ACT BIT(4) -#define HSIO_S6G_IB_STATUS0_IB_OFFSET_VLD BIT(3) -#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ERR BIT(2) -#define HSIO_S6G_IB_STATUS0_IB_OFFSDIR BIT(1) -#define HSIO_S6G_IB_STATUS0_IB_SIG_DET BIT(0) - -#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT(x) (((x) << 18) & GENMASK(23, 18)) -#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_M GENMASK(23, 18) -#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_X(x) (((x) & GENMASK(23, 18)) >> 18) -#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT(x) (((x) << 12) & GENMASK(17, 12)) -#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_M GENMASK(17, 12) -#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_X(x) (((x) & GENMASK(17, 12)) >> 12) -#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT(x) (((x) << 6) & GENMASK(11, 6)) -#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_M GENMASK(11, 6) -#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_X(x) (((x) & GENMASK(11, 6)) >> 6) -#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT(x) ((x) & GENMASK(5, 0)) -#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT_M GENMASK(5, 0) - -#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_N BIT(2) -#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_P BIT(1) -#define HSIO_S6G_ACJTAG_STATUS_IB_DIRECT BIT(0) - -#define HSIO_S6G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(10) -#define HSIO_S6G_PLL_STATUS_PLL_CAL_ERR BIT(9) -#define HSIO_S6G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(8) -#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0)) -#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0) - -#define HSIO_S6G_REVID_SERDES_REV(x) (((x) << 26) & GENMASK(31, 26)) -#define HSIO_S6G_REVID_SERDES_REV_M GENMASK(31, 26) -#define HSIO_S6G_REVID_SERDES_REV_X(x) (((x) & GENMASK(31, 26)) >> 26) -#define HSIO_S6G_REVID_RCPLL_REV(x) (((x) << 21) & GENMASK(25, 21)) -#define HSIO_S6G_REVID_RCPLL_REV_M GENMASK(25, 21) -#define HSIO_S6G_REVID_RCPLL_REV_X(x) (((x) & GENMASK(25, 21)) >> 21) -#define HSIO_S6G_REVID_SER_REV(x) (((x) << 16) & GENMASK(20, 16)) -#define HSIO_S6G_REVID_SER_REV_M GENMASK(20, 16) -#define HSIO_S6G_REVID_SER_REV_X(x) (((x) & GENMASK(20, 16)) >> 16) -#define HSIO_S6G_REVID_DES_REV(x) (((x) << 10) & GENMASK(15, 10)) -#define HSIO_S6G_REVID_DES_REV_M GENMASK(15, 10) -#define HSIO_S6G_REVID_DES_REV_X(x) (((x) & GENMASK(15, 10)) >> 10) -#define HSIO_S6G_REVID_OB_REV(x) (((x) << 5) & GENMASK(9, 5)) -#define HSIO_S6G_REVID_OB_REV_M GENMASK(9, 5) -#define HSIO_S6G_REVID_OB_REV_X(x) (((x) & GENMASK(9, 5)) >> 5) -#define HSIO_S6G_REVID_IB_REV(x) ((x) & GENMASK(4, 0)) -#define HSIO_S6G_REVID_IB_REV_M GENMASK(4, 0) - -#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT BIT(31) -#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT BIT(30) -#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(x) ((x) & GENMASK(24, 0)) -#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR_M GENMASK(24, 0) - -#define HSIO_HW_CFG_DEV2G5_10_MODE BIT(6) -#define HSIO_HW_CFG_DEV1G_9_MODE BIT(5) -#define HSIO_HW_CFG_DEV1G_6_MODE BIT(4) -#define HSIO_HW_CFG_DEV1G_5_MODE BIT(3) -#define HSIO_HW_CFG_DEV1G_4_MODE BIT(2) -#define HSIO_HW_CFG_PCIE_ENA BIT(1) -#define HSIO_HW_CFG_QSGMII_ENA BIT(0) - -#define HSIO_HW_QSGMII_CFG_SHYST_DIS BIT(3) -#define HSIO_HW_QSGMII_CFG_E_DET_ENA BIT(2) -#define HSIO_HW_QSGMII_CFG_USE_I1_ENA BIT(1) -#define HSIO_HW_QSGMII_CFG_FLIP_LANES BIT(0) - -#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS(x) (((x) << 1) & GENMASK(6, 1)) -#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_M GENMASK(6, 1) -#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_X(x) (((x) & GENMASK(6, 1)) >> 1) -#define HSIO_HW_QSGMII_STAT_SYNC BIT(0) - -#define HSIO_CLK_CFG_CLKDIV_PHY(x) (((x) << 1) & GENMASK(8, 1)) -#define HSIO_CLK_CFG_CLKDIV_PHY_M GENMASK(8, 1) -#define HSIO_CLK_CFG_CLKDIV_PHY_X(x) (((x) & GENMASK(8, 1)) >> 1) -#define HSIO_CLK_CFG_CLKDIV_PHY_DIS BIT(0) - -#define HSIO_TEMP_SENSOR_CTRL_FORCE_TEMP_RD BIT(5) -#define HSIO_TEMP_SENSOR_CTRL_FORCE_RUN BIT(4) -#define HSIO_TEMP_SENSOR_CTRL_FORCE_NO_RST BIT(3) -#define HSIO_TEMP_SENSOR_CTRL_FORCE_POWER_UP BIT(2) -#define HSIO_TEMP_SENSOR_CTRL_FORCE_CLK BIT(1) -#define HSIO_TEMP_SENSOR_CTRL_SAMPLE_ENA BIT(0) - -#define HSIO_TEMP_SENSOR_CFG_RUN_WID(x) (((x) << 8) & GENMASK(15, 8)) -#define HSIO_TEMP_SENSOR_CFG_RUN_WID_M GENMASK(15, 8) -#define HSIO_TEMP_SENSOR_CFG_RUN_WID_X(x) (((x) & GENMASK(15, 8)) >> 8) -#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER(x) ((x) & GENMASK(7, 0)) -#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER_M GENMASK(7, 0) - -#define HSIO_TEMP_SENSOR_STAT_TEMP_VALID BIT(8) -#define HSIO_TEMP_SENSOR_STAT_TEMP(x) ((x) & GENMASK(7, 0)) -#define HSIO_TEMP_SENSOR_STAT_TEMP_M GENMASK(7, 0) - -#endif diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c index e334b406c40c..9271af18b93b 100644 --- a/drivers/net/ethernet/mscc/ocelot_regs.c +++ b/drivers/net/ethernet/mscc/ocelot_regs.c @@ -5,6 +5,7 @@ * Copyright (c) 2017 Microsemi Corporation */ #include "ocelot.h" +#include <soc/mscc/ocelot_hsio.h> static const u32 ocelot_ana_regmap[] = { REG(ANA_ADVLEARN, 0x009000), @@ -102,82 +103,6 @@ static const u32 ocelot_qs_regmap[] = { REG(QS_INH_DBG, 0x000048), }; -static const u32 ocelot_hsio_regmap[] = { - REG(HSIO_PLL5G_CFG0, 0x000000), - REG(HSIO_PLL5G_CFG1, 0x000004), - REG(HSIO_PLL5G_CFG2, 0x000008), - REG(HSIO_PLL5G_CFG3, 0x00000c), - REG(HSIO_PLL5G_CFG4, 0x000010), - REG(HSIO_PLL5G_CFG5, 0x000014), - REG(HSIO_PLL5G_CFG6, 0x000018), - REG(HSIO_PLL5G_STATUS0, 0x00001c), - REG(HSIO_PLL5G_STATUS1, 0x000020), - REG(HSIO_PLL5G_BIST_CFG0, 0x000024), - REG(HSIO_PLL5G_BIST_CFG1, 0x000028), - REG(HSIO_PLL5G_BIST_CFG2, 0x00002c), - REG(HSIO_PLL5G_BIST_STAT0, 0x000030), - REG(HSIO_PLL5G_BIST_STAT1, 0x000034), - REG(HSIO_RCOMP_CFG0, 0x000038), - REG(HSIO_RCOMP_STATUS, 0x00003c), - REG(HSIO_SYNC_ETH_CFG, 0x000040), - REG(HSIO_SYNC_ETH_PLL_CFG, 0x000048), - REG(HSIO_S1G_DES_CFG, 0x00004c), - REG(HSIO_S1G_IB_CFG, 0x000050), - REG(HSIO_S1G_OB_CFG, 0x000054), - REG(HSIO_S1G_SER_CFG, 0x000058), - REG(HSIO_S1G_COMMON_CFG, 0x00005c), - REG(HSIO_S1G_PLL_CFG, 0x000060), - REG(HSIO_S1G_PLL_STATUS, 0x000064), - REG(HSIO_S1G_DFT_CFG0, 0x000068), - REG(HSIO_S1G_DFT_CFG1, 0x00006c), - REG(HSIO_S1G_DFT_CFG2, 0x000070), - REG(HSIO_S1G_TP_CFG, 0x000074), - REG(HSIO_S1G_RC_PLL_BIST_CFG, 0x000078), - REG(HSIO_S1G_MISC_CFG, 0x00007c), - REG(HSIO_S1G_DFT_STATUS, 0x000080), - REG(HSIO_S1G_MISC_STATUS, 0x000084), - REG(HSIO_MCB_S1G_ADDR_CFG, 0x000088), - REG(HSIO_S6G_DIG_CFG, 0x00008c), - REG(HSIO_S6G_DFT_CFG0, 0x000090), - REG(HSIO_S6G_DFT_CFG1, 0x000094), - REG(HSIO_S6G_DFT_CFG2, 0x000098), - REG(HSIO_S6G_TP_CFG0, 0x00009c), - REG(HSIO_S6G_TP_CFG1, 0x0000a0), - REG(HSIO_S6G_RC_PLL_BIST_CFG, 0x0000a4), - REG(HSIO_S6G_MISC_CFG, 0x0000a8), - REG(HSIO_S6G_OB_ANEG_CFG, 0x0000ac), - REG(HSIO_S6G_DFT_STATUS, 0x0000b0), - REG(HSIO_S6G_ERR_CNT, 0x0000b4), - REG(HSIO_S6G_MISC_STATUS, 0x0000b8), - REG(HSIO_S6G_DES_CFG, 0x0000bc), - REG(HSIO_S6G_IB_CFG, 0x0000c0), - REG(HSIO_S6G_IB_CFG1, 0x0000c4), - REG(HSIO_S6G_IB_CFG2, 0x0000c8), - REG(HSIO_S6G_IB_CFG3, 0x0000cc), - REG(HSIO_S6G_IB_CFG4, 0x0000d0), - REG(HSIO_S6G_IB_CFG5, 0x0000d4), - REG(HSIO_S6G_OB_CFG, 0x0000d8), - REG(HSIO_S6G_OB_CFG1, 0x0000dc), - REG(HSIO_S6G_SER_CFG, 0x0000e0), - REG(HSIO_S6G_COMMON_CFG, 0x0000e4), - REG(HSIO_S6G_PLL_CFG, 0x0000e8), - REG(HSIO_S6G_ACJTAG_CFG, 0x0000ec), - REG(HSIO_S6G_GP_CFG, 0x0000f0), - REG(HSIO_S6G_IB_STATUS0, 0x0000f4), - REG(HSIO_S6G_IB_STATUS1, 0x0000f8), - REG(HSIO_S6G_ACJTAG_STATUS, 0x0000fc), - REG(HSIO_S6G_PLL_STATUS, 0x000100), - REG(HSIO_S6G_REVID, 0x000104), - REG(HSIO_MCB_S6G_ADDR_CFG, 0x000108), - REG(HSIO_HW_CFG, 0x00010c), - REG(HSIO_HW_QSGMII_CFG, 0x000110), - REG(HSIO_HW_QSGMII_STAT, 0x000114), - REG(HSIO_CLK_CFG, 0x000118), - REG(HSIO_TEMP_SENSOR_CTRL, 0x00011c), - REG(HSIO_TEMP_SENSOR_CFG, 0x000120), - REG(HSIO_TEMP_SENSOR_STAT, 0x000124), -}; - static const u32 ocelot_qsys_regmap[] = { REG(QSYS_PORT_MODE, 0x011200), REG(QSYS_SWITCH_PORT_MODE, 0x011234), @@ -302,7 +227,6 @@ static const u32 ocelot_sys_regmap[] = { static const u32 *ocelot_regmap[] = { [ANA] = ocelot_ana_regmap, [QS] = ocelot_qs_regmap, - [HSIO] = ocelot_hsio_regmap, [QSYS] = ocelot_qsys_regmap, [REW] = ocelot_rew_regmap, [SYS] = ocelot_sys_regmap, @@ -453,9 +377,11 @@ static void ocelot_pll5_init(struct ocelot *ocelot) /* Configure PLL5. This will need a proper CCF driver * The values are coming from the VTSS API for Ocelot */ - ocelot_write(ocelot, HSIO_PLL5G_CFG4_IB_CTRL(0x7600) | - HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8), HSIO_PLL5G_CFG4); - ocelot_write(ocelot, HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) | + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4, + HSIO_PLL5G_CFG4_IB_CTRL(0x7600) | + HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8)); + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0, + HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) | HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) | HSIO_PLL5G_CFG0_ENA_BIAS | HSIO_PLL5G_CFG0_ENA_VCO_BUF | @@ -465,13 +391,14 @@ static void ocelot_pll5_init(struct ocelot *ocelot) HSIO_PLL5G_CFG0_SELBGV820(4) | HSIO_PLL5G_CFG0_DIV4 | HSIO_PLL5G_CFG0_ENA_CLKTREE | - HSIO_PLL5G_CFG0_ENA_LANE, HSIO_PLL5G_CFG0); - ocelot_write(ocelot, HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET | + HSIO_PLL5G_CFG0_ENA_LANE); + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2, + HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET | HSIO_PLL5G_CFG2_EN_RESET_OVERRUN | HSIO_PLL5G_CFG2_GAIN_TEST(0x8) | HSIO_PLL5G_CFG2_ENA_AMPCTRL | HSIO_PLL5G_CFG2_PWD_AMPCTRL_N | - HSIO_PLL5G_CFG2_AMPC_SEL(0x10), HSIO_PLL5G_CFG2); + HSIO_PLL5G_CFG2_AMPC_SEL(0x10)); } int ocelot_chip_init(struct ocelot *ocelot) diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index b8983e73265a..82be90075695 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -75,6 +75,7 @@ #include <linux/tcp.h> #include <linux/uaccess.h> #include <linux/io.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/slab.h> #include <linux/prefetch.h> #include <net/tcp.h> @@ -491,7 +492,7 @@ static struct pci_driver s2io_driver = { }; /* A simplifier macro used both by init and free shared_mem Fns(). */ -#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each) +#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each) /* netqueue manipulation helper functions */ static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp) @@ -3679,11 +3680,9 @@ static void restore_xmsi_data(struct s2io_nic *nic) writeq(nic->msix_info[i].data, &bar0->xmsi_data); val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); writeq(val64, &bar0->xmsi_access); - if (wait_for_msix_trans(nic, msix_index)) { + if (wait_for_msix_trans(nic, msix_index)) DBG_PRINT(ERR_DBG, "%s: index: %d failed\n", __func__, msix_index); - continue; - } } } diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h index 1a24a7218794..0a921f30f98f 100644 --- a/drivers/net/ethernet/neterion/s2io.h +++ b/drivers/net/ethernet/neterion/s2io.h @@ -10,6 +10,7 @@ * system is licensed under the GPL. * See the file COPYING in this distribution for more information. ************************************************************************/ +#include <linux/io-64-nonatomic-lo-hi.h> #ifndef _S2IO_H #define _S2IO_H @@ -970,27 +971,6 @@ struct s2io_nic { #define RESET_ERROR 1 #define CMD_ERROR 2 -/* OS related system calls */ -#ifndef readq -static inline u64 readq(void __iomem *addr) -{ - u64 ret = 0; - ret = readl(addr + 4); - ret <<= 32; - ret |= readl(addr); - - return ret; -} -#endif - -#ifndef writeq -static inline void writeq(u64 val, void __iomem *addr) -{ - writel((u32) (val), addr); - writel((u32) (val >> 32), (addr + 4)); -} -#endif - /* * Some registers have to be written in a particular order to * expect correct hardware operation. The macro SPECIAL_REG_WRITE diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 398011c87643..4c1fb7e57888 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -13,6 +13,7 @@ ******************************************************************************/ #include <linux/vmalloc.h> #include <linux/etherdevice.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/pci.h> #include <linux/slab.h> diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h index d743a37a3cee..e678ba379598 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h @@ -2011,26 +2011,6 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set( void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); -#ifndef readq -static inline u64 readq(void __iomem *addr) -{ - u64 ret = 0; - ret = readl(addr + 4); - ret <<= 32; - ret |= readl(addr); - - return ret; -} -#endif - -#ifndef writeq -static inline void writeq(u64 val, void __iomem *addr) -{ - writel((u32) (val), addr); - writel((u32) (val >> 32), (addr + 4)); -} -#endif - static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr) { writel(val, addr + 4); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c index 0c3b5dea2858..f7a0d1d5885e 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c @@ -12,6 +12,7 @@ * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #include <linux/etherdevice.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/prefetch.h> #include "vxge-traffic.h" @@ -2261,7 +2262,7 @@ void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) { struct __vxge_hw_device *hldev = vp->vpath->hldev; - if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)) + if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) __vxge_hw_pio_mem_write32_upper( (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]); diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index b157ccd8c80f..3c661f422688 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -1,36 +1,5 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/kernel.h> @@ -55,30 +24,21 @@ #define NFP_QMSTAT_DROP 16 #define NFP_QMSTAT_ECN 24 -static unsigned long long -nfp_abm_q_lvl_thrs(struct nfp_abm_link *alink, unsigned int queue) -{ - return alink->abm->q_lvls->addr + - (alink->queue_base + queue) * NFP_QLVL_STRIDE + NFP_QLVL_THRS; -} - static int nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, unsigned int stride, unsigned int offset, unsigned int i, bool is_u64, u64 *res) { struct nfp_cpp *cpp = alink->abm->app->cpp; - u32 val32, mur; - u64 val, addr; + u64 val, sym_offset; + u32 val32; int err; - mur = NFP_CPP_ATOMIC_RD(sym->target, sym->domain); - - addr = sym->addr + (alink->queue_base + i) * stride + offset; + sym_offset = (alink->queue_base + i) * stride + offset; if (is_u64) - err = nfp_cpp_readq(cpp, mur, addr, &val); + err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val); else - err = nfp_cpp_readl(cpp, mur, addr, &val32); + err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32); if (err) { nfp_err(cpp, "RED offload reading stat failed on vNIC %d queue %d\n", @@ -114,13 +74,12 @@ nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val) { struct nfp_cpp *cpp = alink->abm->app->cpp; - u32 muw; + u64 sym_offset; int err; - muw = NFP_CPP_ATOMIC_WR(alink->abm->q_lvls->target, - alink->abm->q_lvls->domain); - - err = nfp_cpp_writel(cpp, muw, nfp_abm_q_lvl_thrs(alink, i), val); + sym_offset = (alink->queue_base + i) * NFP_QLVL_STRIDE + NFP_QLVL_THRS; + err = __nfp_rtsym_writel(cpp, alink->abm->q_lvls, 4, 0, + sym_offset, val); if (err) { nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n", alink->id, i); @@ -290,10 +249,10 @@ nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size) nfp_err(pf->cpp, "Symbol '%s' not found\n", name); return ERR_PTR(-ENOENT); } - if (sym->size != size) { + if (nfp_rtsym_size(sym) != size) { nfp_err(pf->cpp, "Symbol '%s' wrong size: expected %u got %llu\n", - name, size, sym->size); + name, size, nfp_rtsym_size(sym)); return ERR_PTR(-EINVAL); } diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index b84a6c2d387b..c0830c0c2c3f 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -1,36 +1,5 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/etherdevice.h> @@ -540,8 +509,9 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, { struct nfp_eth_table_port *eth_port = &pf->eth_tbl->ports[id]; u8 mac_addr[ETH_ALEN]; - const char *mac_str; - char name[32]; + struct nfp_nsp *nsp; + char hwinfo[32]; + int err; if (id > pf->eth_tbl->count) { nfp_warn(pf->cpp, "No entry for persistent MAC address\n"); @@ -549,22 +519,37 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, return; } - snprintf(name, sizeof(name), "eth%u.mac.pf%u", + snprintf(hwinfo, sizeof(hwinfo), "eth%u.mac.pf%u", eth_port->eth_index, abm->pf_id); - mac_str = nfp_hwinfo_lookup(pf->hwinfo, name); - if (!mac_str) { - nfp_warn(pf->cpp, "Can't lookup persistent MAC address (%s)\n", - name); + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + nfp_warn(pf->cpp, "Failed to access the NSP for persistent MAC address: %ld\n", + PTR_ERR(nsp)); + eth_hw_addr_random(nn->dp.netdev); + return; + } + + if (!nfp_nsp_has_hwinfo_lookup(nsp)) { + nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n"); + eth_hw_addr_random(nn->dp.netdev); + return; + } + + err = nfp_nsp_hwinfo_lookup(nsp, hwinfo, sizeof(hwinfo)); + nfp_nsp_close(nsp); + if (err) { + nfp_warn(pf->cpp, "Reading persistent MAC address failed: %d\n", + err); eth_hw_addr_random(nn->dp.netdev); return; } - if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", + if (sscanf(hwinfo, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", &mac_addr[0], &mac_addr[1], &mac_addr[2], &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { nfp_warn(pf->cpp, "Can't parse persistent MAC address (%s)\n", - mac_str); + hwinfo); eth_hw_addr_random(nn->dp.netdev); return; } diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index 934a70835473..f907b7d98917 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -1,36 +1,5 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2018 Netronome Systems, Inc. */ #ifndef __NFP_ABM_H__ #define __NFP_ABM_H__ 1 diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 2572a4b91c7c..9b6cfa697879 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bpf.h> #include <linux/bitops.h> @@ -89,15 +59,32 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) return skb; } +static unsigned int +nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n) +{ + unsigned int size; + + size = sizeof(struct cmsg_req_map_op); + size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n; + + return size; +} + static struct sk_buff * nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n) { + return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n)); +} + +static unsigned int +nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n) +{ unsigned int size; - size = sizeof(struct cmsg_req_map_op); - size += sizeof(struct cmsg_key_value_pair) * n; + size = sizeof(struct cmsg_reply_map_op); + size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n; - return nfp_bpf_cmsg_alloc(bpf, size); + return size; } static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb) @@ -338,6 +325,34 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map) dev_consume_skb_any(skb); } +static void * +nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req, + unsigned int n) +{ + return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n]; +} + +static void * +nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req, + unsigned int n) +{ + return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; +} + +static void * +nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, + unsigned int n) +{ + return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n]; +} + +static void * +nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, + unsigned int n) +{ + return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; +} + static int nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_bpf_cmsg_type op, @@ -366,12 +381,13 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, /* Copy inputs */ if (key) - memcpy(&req->elem[0].key, key, map->key_size); + memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size); if (value) - memcpy(&req->elem[0].value, value, map->value_size); + memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value, + map->value_size); skb = nfp_bpf_cmsg_communicate(bpf, skb, op, - sizeof(*reply) + sizeof(*reply->elem)); + nfp_bpf_cmsg_map_reply_size(bpf, 1)); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -382,9 +398,11 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, /* Copy outputs */ if (out_key) - memcpy(out_key, &reply->elem[0].key, map->key_size); + memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0), + map->key_size); if (out_value) - memcpy(out_value, &reply->elem[0].value, map->value_size); + memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0), + map->value_size); dev_consume_skb_any(skb); @@ -428,6 +446,13 @@ int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, key, NULL, 0, next_key, NULL); } +unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf) +{ + return max3((unsigned int)NFP_NET_DEFAULT_MTU, + nfp_bpf_cmsg_map_req_size(bpf, 1), + nfp_bpf_cmsg_map_reply_size(bpf, 1)); +} + void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_app_bpf *bpf = app->priv; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h index e4f9b7ec8528..721921bcf120 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef NFP_BPF_FW_H #define NFP_BPF_FW_H 1 @@ -52,6 +22,7 @@ enum bpf_cap_tlv_type { NFP_BPF_CAP_TYPE_RANDOM = 4, NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5, NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6, + NFP_BPF_CAP_TYPE_ABI_VERSION = 7, }; struct nfp_bpf_cap_tlv_func { @@ -98,6 +69,7 @@ enum nfp_bpf_cmsg_type { #define CMSG_TYPE_MAP_REPLY_BIT 7 #define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req)) +/* BPF ABIv2 fixed-length control message fields */ #define CMSG_MAP_KEY_LW 16 #define CMSG_MAP_VALUE_LW 16 @@ -147,24 +119,19 @@ struct cmsg_reply_map_free_tbl { __be32 count; }; -struct cmsg_key_value_pair { - __be32 key[CMSG_MAP_KEY_LW]; - __be32 value[CMSG_MAP_VALUE_LW]; -}; - struct cmsg_req_map_op { struct cmsg_hdr hdr; __be32 tid; __be32 count; __be32 flags; - struct cmsg_key_value_pair elem[0]; + u8 data[0]; }; struct cmsg_reply_map_op { struct cmsg_reply_map_simple reply_hdr; __be32 count; __be32 resv; - struct cmsg_key_value_pair elem[0]; + u8 data[0]; }; struct cmsg_bpf_event { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index eff57f7d056a..97d33bb4d84d 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #define pr_fmt(fmt) "NFP net bpf: " fmt @@ -267,6 +237,38 @@ emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) } static void +__emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, + u8 defer, bool dst_lmextn, bool src_lmextn) +{ + u64 insn; + + insn = OP_BR_ALU_BASE | + FIELD_PREP(OP_BR_ALU_A_SRC, areg) | + FIELD_PREP(OP_BR_ALU_B_SRC, breg) | + FIELD_PREP(OP_BR_ALU_DEFBR, defer) | + FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) | + FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) | + FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn); + + nfp_prog_push(nfp_prog, insn); +} + +static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer) +{ + struct nfp_insn_ur_regs reg; + int err; + + err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), ®); + if (err) { + nfp_prog->error = err; + return; + } + + __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn, + reg.src_lmextn); +} + +static void __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, enum immed_width width, bool invert, enum immed_shift shift, bool wr_both, @@ -1137,7 +1139,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, bool clr_gpr, lmem_step step) { - s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; + s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; bool first = true, last; bool needs_inc = false; swreg stack_off_reg; @@ -1146,7 +1148,8 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool lm3 = true; int ret; - if (meta->ptr_not_const) { + if (meta->ptr_not_const || + meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) { /* Use of the last encountered ptr_off is OK, they all have * the same alignment. Depend on low bits of value being * discarded when written to LMaddr register. @@ -1695,7 +1698,7 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) s64 lm_off; /* We only have to reload LM0 if the key is not at start of stack */ - lm_off = nfp_prog->stack_depth; + lm_off = nfp_prog->stack_frame_depth; lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; load_lm_ptr = meta->arg2.var_off || lm_off; @@ -1808,10 +1811,10 @@ static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) swreg stack_depth_reg; stack_depth_reg = ur_load_imm_any(nfp_prog, - nfp_prog->stack_depth, + nfp_prog->stack_frame_depth, stack_imm(nfp_prog)); - emit_alu(nfp_prog, reg_both(dst), - stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); + emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog), + ALU_OP_ADD, stack_depth_reg); wrp_immed(nfp_prog, reg_both(dst + 1), 0); } else { wrp_reg_mov(nfp_prog, dst, src); @@ -3081,7 +3084,93 @@ static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); } -static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +static int +bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + u32 ret_tgt, stack_depth, offset_br; + swreg tmp_reg; + + stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN); + /* Space for saving the return address is accounted for by the callee, + * so stack_depth can be zero for the main function. + */ + if (stack_depth) { + tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, + stack_imm(nfp_prog)); + emit_alu(nfp_prog, stack_reg(nfp_prog), + stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg); + emit_csr_wr(nfp_prog, stack_reg(nfp_prog), + NFP_CSR_ACT_LM_ADDR0); + } + + /* Two cases for jumping to the callee: + * + * - If callee uses and needs to save R6~R9 then: + * 1. Put the start offset of the callee into imm_b(). This will + * require a fixup step, as we do not necessarily know this + * address yet. + * 2. Put the return address from the callee to the caller into + * register ret_reg(). + * 3. (After defer slots are consumed) Jump to the subroutine that + * pushes the registers to the stack. + * The subroutine acts as a trampoline, and returns to the address in + * imm_b(), i.e. jumps to the callee. + * + * - If callee does not need to save R6~R9 then just load return + * address to the caller in ret_reg(), and jump to the callee + * directly. + * + * Using ret_reg() to pass the return address to the callee is set here + * as a convention. The callee can then push this address onto its + * stack frame in its prologue. The advantages of passing the return + * address through ret_reg(), instead of pushing it to the stack right + * here, are the following: + * - It looks cleaner. + * - If the called function is called multiple time, we get a lower + * program size. + * - We save two no-op instructions that should be added just before + * the emit_br() when stack depth is not null otherwise. + * - If we ever find a register to hold the return address during whole + * execution of the callee, we will not have to push the return + * address to the stack for leaf functions. + */ + if (!meta->jmp_dst) { + pr_err("BUG: BPF-to-BPF call has no destination recorded\n"); + return -ELOOP; + } + if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) { + ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, + RELO_BR_GO_CALL_PUSH_REGS); + offset_br = nfp_prog_current_offset(nfp_prog); + wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL); + } else { + ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; + emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1); + offset_br = nfp_prog_current_offset(nfp_prog); + } + wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL); + + if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) + return -EINVAL; + + if (stack_depth) { + tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, + stack_imm(nfp_prog)); + emit_alu(nfp_prog, stack_reg(nfp_prog), + stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg); + emit_csr_wr(nfp_prog, stack_reg(nfp_prog), + NFP_CSR_ACT_LM_ADDR0); + wrp_nops(nfp_prog, 3); + } + + meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog); + meta->num_insns_after_br -= offset_br; + + return 0; +} + +static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { switch (meta->insn.imm) { case BPF_FUNC_xdp_adjust_head: @@ -3102,6 +3191,19 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) } } +static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + if (is_mbpf_pseudo_call(meta)) + return bpf_to_bpf_call(nfp_prog, meta); + else + return helper_call(nfp_prog, meta); +} + +static bool nfp_is_main_function(struct nfp_insn_meta *meta) +{ + return meta->subprog_idx == 0; +} + static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); @@ -3109,6 +3211,39 @@ static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int +nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) { + /* Pop R6~R9 to the stack via related subroutine. + * We loaded the return address to the caller into ret_reg(). + * This means that the subroutine does not come back here, we + * make it jump back to the subprogram caller directly! + */ + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1, + RELO_BR_GO_CALL_POP_REGS); + /* Pop return address from the stack. */ + wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); + } else { + /* Pop return address from the stack. */ + wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); + /* Jump back to caller if no callee-saved registers were used + * by the subprogram. + */ + emit_rtn(nfp_prog, ret_reg(nfp_prog), 0); + } + + return 0; +} + +static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + if (nfp_is_main_function(meta)) + return goto_out(nfp_prog, meta); + else + return nfp_subprog_epilogue(nfp_prog, meta); +} + static const instr_cb_t instr_cb[256] = { [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, @@ -3197,36 +3332,66 @@ static const instr_cb_t instr_cb[256] = { [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, [BPF_JMP | BPF_CALL] = call, - [BPF_JMP | BPF_EXIT] = goto_out, + [BPF_JMP | BPF_EXIT] = jmp_exit, }; /* --- Assembler logic --- */ +static int +nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + struct nfp_insn_meta *jmp_dst, u32 br_idx) +{ + if (immed_get_value(nfp_prog->prog[br_idx + 1])) { + pr_err("BUG: failed to fix up callee register saving\n"); + return -EINVAL; + } + + immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off); + + return 0; +} + static int nfp_fixup_branches(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta, *jmp_dst; u32 idx, br_idx; + int err; list_for_each_entry(meta, &nfp_prog->insns, l) { if (meta->skip) continue; - if (meta->insn.code == (BPF_JMP | BPF_CALL)) - continue; if (BPF_CLASS(meta->insn.code) != BPF_JMP) continue; + if (meta->insn.code == (BPF_JMP | BPF_EXIT) && + !nfp_is_main_function(meta)) + continue; + if (is_mbpf_helper_call(meta)) + continue; if (list_is_last(&meta->l, &nfp_prog->insns)) br_idx = nfp_prog->last_bpf_off; else br_idx = list_next_entry(meta, l)->off - 1; + /* For BPF-to-BPF function call, a stack adjustment sequence is + * generated after the return instruction. Therefore, we must + * withdraw the length of this sequence to have br_idx pointing + * to where the "branch" NFP instruction is expected to be. + */ + if (is_mbpf_pseudo_call(meta)) + br_idx -= meta->num_insns_after_br; + if (!nfp_is_br(nfp_prog->prog[br_idx])) { pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", br_idx, meta->insn.code, nfp_prog->prog[br_idx]); return -ELOOP; } + + if (meta->insn.code == (BPF_JMP | BPF_EXIT)) + continue; + /* Leave special branches for later */ if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != - RELO_BR_REL) + RELO_BR_REL && !is_mbpf_pseudo_call(meta)) continue; if (!meta->jmp_dst) { @@ -3241,6 +3406,18 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog) return -ELOOP; } + if (is_mbpf_pseudo_call(meta) && + nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) { + err = nfp_fixup_immed_relo(nfp_prog, meta, + jmp_dst, br_idx); + if (err) + return err; + } + + if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != + RELO_BR_REL) + continue; + for (idx = meta->off; idx <= br_idx; idx++) { if (!nfp_is_br(nfp_prog->prog[idx])) continue; @@ -3258,6 +3435,27 @@ static void nfp_intro(struct nfp_prog *nfp_prog) plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); } +static void +nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + /* Save return address into the stack. */ + wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog)); +} + +static void +nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth; + + nfp_prog->stack_frame_depth = round_up(depth, 4); + nfp_subprog_prologue(nfp_prog, meta); +} + +bool nfp_is_subprog_start(struct nfp_insn_meta *meta) +{ + return meta->flags & FLAG_INSN_IS_SUBPROG_START; +} + static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) { /* TC direct-action mode: @@ -3348,6 +3546,67 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog) emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); } +static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog) +{ + unsigned int idx; + + for (idx = 1; idx < nfp_prog->subprog_cnt; idx++) + if (nfp_prog->subprog[idx].needs_reg_push) + return true; + + return false; +} + +static void nfp_push_callee_registers(struct nfp_prog *nfp_prog) +{ + u8 reg; + + /* Subroutine: Save all callee saved registers (R6 ~ R9). + * imm_b() holds the return address. + */ + nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog); + for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { + u8 adj = (reg - BPF_REG_0) * 2; + u8 idx = (reg - BPF_REG_6) * 2; + + /* The first slot in the stack frame is used to push the return + * address in bpf_to_bpf_call(), start just after. + */ + wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj)); + + if (reg == BPF_REG_8) + /* Prepare to jump back, last 3 insns use defer slots */ + emit_rtn(nfp_prog, imm_b(nfp_prog), 3); + + wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1)); + } +} + +static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog) +{ + u8 reg; + + /* Subroutine: Restore all callee saved registers (R6 ~ R9). + * ret_reg() holds the return address. + */ + nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog); + for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { + u8 adj = (reg - BPF_REG_0) * 2; + u8 idx = (reg - BPF_REG_6) * 2; + + /* The first slot in the stack frame holds the return address, + * start popping just after that. + */ + wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx)); + + if (reg == BPF_REG_8) + /* Prepare to jump back, last 3 insns use defer slots */ + emit_rtn(nfp_prog, ret_reg(nfp_prog), 3); + + wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1)); + } +} + static void nfp_outro(struct nfp_prog *nfp_prog) { switch (nfp_prog->type) { @@ -3360,13 +3619,23 @@ static void nfp_outro(struct nfp_prog *nfp_prog) default: WARN_ON(1); } + + if (!nfp_prog_needs_callee_reg_save(nfp_prog)) + return; + + nfp_push_callee_registers(nfp_prog); + nfp_pop_callee_registers(nfp_prog); } static int nfp_translate(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta; + unsigned int depth; int err; + depth = nfp_prog->subprog[0].stack_depth; + nfp_prog->stack_frame_depth = round_up(depth, 4); + nfp_intro(nfp_prog); if (nfp_prog->error) return nfp_prog->error; @@ -3376,6 +3645,12 @@ static int nfp_translate(struct nfp_prog *nfp_prog) meta->off = nfp_prog_current_offset(nfp_prog); + if (nfp_is_subprog_start(meta)) { + nfp_start_subprog(nfp_prog, meta); + if (nfp_prog->error) + return nfp_prog->error; + } + if (meta->skip) { nfp_prog->n_translated++; continue; @@ -4018,20 +4293,35 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) /* Another pass to record jump information. */ list_for_each_entry(meta, &nfp_prog->insns, l) { + struct nfp_insn_meta *dst_meta; u64 code = meta->insn.code; + unsigned int dst_idx; + bool pseudo_call; + + if (BPF_CLASS(code) != BPF_JMP) + continue; + if (BPF_OP(code) == BPF_EXIT) + continue; + if (is_mbpf_helper_call(meta)) + continue; - if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && - BPF_OP(code) != BPF_CALL) { - struct nfp_insn_meta *dst_meta; - unsigned short dst_indx; + /* If opcode is BPF_CALL at this point, this can only be a + * BPF-to-BPF call (a.k.a pseudo call). + */ + pseudo_call = BPF_OP(code) == BPF_CALL; - dst_indx = meta->n + 1 + meta->insn.off; - dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, - cnt); + if (pseudo_call) + dst_idx = meta->n + 1 + meta->insn.imm; + else + dst_idx = meta->n + 1 + meta->insn.off; - meta->jmp_dst = dst_meta; - dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; - } + dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt); + + if (pseudo_call) + dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START; + + dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; + meta->jmp_dst = dst_meta; } } @@ -4054,6 +4344,7 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) for (i = 0; i < nfp_prog->prog_len; i++) { enum nfp_relo_type special; u32 val; + u16 off; special = FIELD_GET(OP_RELO_TYPE, prog[i]); switch (special) { @@ -4070,6 +4361,24 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) br_set_offset(&prog[i], nfp_prog->tgt_abort + bv->start_off); break; + case RELO_BR_GO_CALL_PUSH_REGS: + if (!nfp_prog->tgt_call_push_regs) { + pr_err("BUG: failed to detect subprogram registers needs\n"); + err = -EINVAL; + goto err_free_prog; + } + off = nfp_prog->tgt_call_push_regs + bv->start_off; + br_set_offset(&prog[i], off); + break; + case RELO_BR_GO_CALL_POP_REGS: + if (!nfp_prog->tgt_call_pop_regs) { + pr_err("BUG: failed to detect subprogram registers needs\n"); + err = -EINVAL; + goto err_free_prog; + } + off = nfp_prog->tgt_call_pop_regs + bv->start_off; + br_set_offset(&prog[i], off); + break; case RELO_BR_NEXT_PKT: br_set_offset(&prog[i], bv->tgt_done); break; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 970af07f4656..6243af0ab025 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <net/pkt_cls.h> @@ -54,11 +24,14 @@ const struct rhashtable_params nfp_bpf_maps_neutral_params = { static bool nfp_net_ebpf_capable(struct nfp_net *nn) { #ifdef __LITTLE_ENDIAN - if (nn->cap & NFP_NET_CFG_CTRL_BPF && - nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI) - return true; -#endif + struct nfp_app_bpf *bpf = nn->app->priv; + + return nn->cap & NFP_NET_CFG_CTRL_BPF && + bpf->abi_version && + nn_readb(nn, NFP_NET_CFG_BPF_ABI) == bpf->abi_version; +#else return false; +#endif } static int @@ -342,6 +315,26 @@ nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value, return 0; } +static int +nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value, + u32 length) +{ + if (length < 4) { + nfp_err(bpf->app->cpp, "truncated ABI version TLV: %d\n", + length); + return -EINVAL; + } + + bpf->abi_version = readl(value); + if (bpf->abi_version < 2 || bpf->abi_version > 3) { + nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n", + bpf->abi_version); + bpf->abi_version = 0; + } + + return 0; +} + static int nfp_bpf_parse_capabilities(struct nfp_app *app) { struct nfp_cpp *cpp = app->pf->cpp; @@ -393,6 +386,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) length)) goto err_release_free; break; + case NFP_BPF_CAP_TYPE_ABI_VERSION: + if (nfp_bpf_parse_cap_abi_version(app->priv, value, + length)) + goto err_release_free; + break; default: nfp_dbg(cpp, "unknown BPF capability: %d\n", type); break; @@ -414,6 +412,11 @@ err_release_free: return -EINVAL; } +static void nfp_bpf_init_capabilities(struct nfp_app_bpf *bpf) +{ + bpf->abi_version = 2; /* Original BPF ABI version */ +} + static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev) { struct nfp_app_bpf *bpf = app->priv; @@ -447,10 +450,21 @@ static int nfp_bpf_init(struct nfp_app *app) if (err) goto err_free_bpf; + nfp_bpf_init_capabilities(bpf); + err = nfp_bpf_parse_capabilities(app); if (err) goto err_free_neutral_maps; + if (bpf->abi_version < 3) { + bpf->cmsg_key_sz = CMSG_MAP_KEY_LW * 4; + bpf->cmsg_val_sz = CMSG_MAP_VALUE_LW * 4; + } else { + bpf->cmsg_key_sz = bpf->maps.max_key_sz; + bpf->cmsg_val_sz = bpf->maps.max_val_sz; + app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf); + } + bpf->bpf_dev = bpf_offload_dev_create(); err = PTR_ERR_OR_ZERO(bpf->bpf_dev); if (err) @@ -465,11 +479,6 @@ err_free_bpf: return err; } -static void nfp_check_rhashtable_empty(void *ptr, void *arg) -{ - WARN_ON_ONCE(1); -} - static void nfp_bpf_clean(struct nfp_app *app) { struct nfp_app_bpf *bpf = app->priv; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index dbd00982fd2b..7f591d71ab28 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #ifndef __NFP_BPF_H__ #define __NFP_BPF_H__ 1 @@ -61,6 +31,8 @@ enum nfp_relo_type { /* internal jumps to parts of the outro */ RELO_BR_GO_OUT, RELO_BR_GO_ABORT, + RELO_BR_GO_CALL_PUSH_REGS, + RELO_BR_GO_CALL_POP_REGS, /* external jumps to fixed addresses */ RELO_BR_NEXT_PKT, RELO_BR_HELPER, @@ -104,6 +76,7 @@ enum pkt_vec { #define imma_a(np) reg_a(STATIC_REG_IMMA) #define imma_b(np) reg_b(STATIC_REG_IMMA) #define imm_both(np) reg_both(STATIC_REG_IMM) +#define ret_reg(np) imm_a(np) #define NFP_BPF_ABI_FLAGS reg_imm(0) #define NFP_BPF_ABI_FLAG_MARK 1 @@ -121,12 +94,17 @@ enum pkt_vec { * @cmsg_replies: received cmsg replies waiting to be consumed * @cmsg_wq: work queue for waiting for cmsg replies * + * @cmsg_key_sz: size of key in cmsg element array + * @cmsg_val_sz: size of value in cmsg element array + * * @map_list: list of offloaded maps * @maps_in_use: number of currently offloaded maps * @map_elems_in_use: number of elements allocated to offloaded maps * * @maps_neutral: hash table of offload-neutral maps (on pointer) * + * @abi_version: global BPF ABI version + * * @adjust_head: adjust head capability * @adjust_head.flags: extra flags for adjust head * @adjust_head.off_min: minimal packet offset within buffer required @@ -164,12 +142,17 @@ struct nfp_app_bpf { struct sk_buff_head cmsg_replies; struct wait_queue_head cmsg_wq; + unsigned int cmsg_key_sz; + unsigned int cmsg_val_sz; + struct list_head map_list; unsigned int maps_in_use; unsigned int map_elems_in_use; struct rhashtable maps_neutral; + u32 abi_version; + struct nfp_bpf_cap_adjust_head { u32 flags; int off_min; @@ -206,6 +189,11 @@ enum nfp_bpf_map_use { NFP_MAP_USE_ATOMIC_CNT, }; +struct nfp_bpf_map_word { + unsigned char type :4; + unsigned char non_zero_update :1; +}; + /** * struct nfp_bpf_map - private per-map data attached to BPF maps for offload * @offmap: pointer to the offloaded BPF map @@ -219,7 +207,7 @@ struct nfp_bpf_map { struct nfp_app_bpf *bpf; u32 tid; struct list_head l; - enum nfp_bpf_map_use use_map[]; + struct nfp_bpf_map_word use_map[]; }; struct nfp_bpf_neutral_map { @@ -252,7 +240,9 @@ struct nfp_bpf_reg_state { bool var_off; }; -#define FLAG_INSN_IS_JUMP_DST BIT(0) +#define FLAG_INSN_IS_JUMP_DST BIT(0) +#define FLAG_INSN_IS_SUBPROG_START BIT(1) +#define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2) /** * struct nfp_insn_meta - BPF instruction wrapper @@ -269,6 +259,7 @@ struct nfp_bpf_reg_state { * @xadd_maybe_16bit: 16bit immediate is possible * @jmp_dst: destination info for jump instructions * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB + * @num_insns_after_br: number of insns following a branch jump, used for fixup * @func_id: function id for call instructions * @arg1: arg1 for call instructions * @arg2: arg2 for call instructions @@ -279,6 +270,7 @@ struct nfp_bpf_reg_state { * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags + * @subprog_idx: index of subprogram to which the instruction belongs * @skip: skip this instruction (optimized out) * @double_cb: callback for second part of the instruction * @l: link on nfp_prog->insns list @@ -304,6 +296,7 @@ struct nfp_insn_meta { struct { struct nfp_insn_meta *jmp_dst; bool jump_neg_op; + u32 num_insns_after_br; /* only for BPF-to-BPF calls */ }; /* function calls */ struct { @@ -325,6 +318,7 @@ struct nfp_insn_meta { unsigned int off; unsigned short n; unsigned short flags; + unsigned short subprog_idx; bool skip; instr_cb_t double_cb; @@ -413,23 +407,56 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV; } +static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta) +{ + struct bpf_insn insn = meta->insn; + + return insn.code == (BPF_JMP | BPF_CALL) && + insn.src_reg != BPF_PSEUDO_CALL; +} + +static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta) +{ + struct bpf_insn insn = meta->insn; + + return insn.code == (BPF_JMP | BPF_CALL) && + insn.src_reg == BPF_PSEUDO_CALL; +} + +#define STACK_FRAME_ALIGN 64 + +/** + * struct nfp_bpf_subprog_info - nfp BPF sub-program (a.k.a. function) info + * @stack_depth: maximum stack depth used by this sub-program + * @needs_reg_push: whether sub-program uses callee-saved registers + */ +struct nfp_bpf_subprog_info { + u16 stack_depth; + u8 needs_reg_push : 1; +}; + /** * struct nfp_prog - nfp BPF program * @bpf: backpointer to the bpf app priv structure * @prog: machine code * @prog_len: number of valid instructions in @prog array * @__prog_alloc_len: alloc size of @prog array + * @stack_size: total amount of stack used * @verifier_meta: temporary storage for verifier's insn meta * @type: BPF program type * @last_bpf_off: address of the last instruction translated from BPF * @tgt_out: jump target for normal exit * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) + * @tgt_call_push_regs: jump target for subroutine for saving R6~R9 to stack + * @tgt_call_pop_regs: jump target for subroutine used for restoring R6~R9 * @n_translated: number of successfully translated instructions (for errors) * @error: error code if something went wrong - * @stack_depth: max stack depth from the verifier + * @stack_frame_depth: max stack depth for current frame * @adjust_head_location: if program has single adjust head call - the insn no. * @map_records_cnt: the number of map pointers recorded for this prog + * @subprog_cnt: number of sub-programs, including main function * @map_records: the map record pointers from bpf->maps_neutral + * @subprog: pointer to an array of objects holding info about sub-programs * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) */ struct nfp_prog { @@ -439,6 +466,8 @@ struct nfp_prog { unsigned int prog_len; unsigned int __prog_alloc_len; + unsigned int stack_size; + struct nfp_insn_meta *verifier_meta; enum bpf_prog_type type; @@ -446,15 +475,19 @@ struct nfp_prog { unsigned int last_bpf_off; unsigned int tgt_out; unsigned int tgt_abort; + unsigned int tgt_call_push_regs; + unsigned int tgt_call_pop_regs; unsigned int n_translated; int error; - unsigned int stack_depth; + unsigned int stack_frame_depth; unsigned int adjust_head_location; unsigned int map_records_cnt; + unsigned int subprog_cnt; struct nfp_bpf_neutral_map **map_records; + struct nfp_bpf_subprog_info *subprog; struct list_head insns; }; @@ -471,6 +504,7 @@ struct nfp_bpf_vnic { unsigned int tgt_done; }; +bool nfp_is_subprog_start(struct nfp_insn_meta *meta); void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt); int nfp_bpf_jit(struct nfp_prog *prog); bool nfp_bpf_supported_opcode(u8 code); @@ -492,6 +526,7 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); +unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf); long long int nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); void diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 1ccd6371a15b..ba8ceedcf6a2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ /* * nfp_net_offload.c @@ -208,6 +178,8 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta, *tmp; + kfree(nfp_prog->subprog); + list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) { list_del(&meta->l); kfree(meta); @@ -250,18 +222,9 @@ err_free: static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog) { struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; - unsigned int stack_size; unsigned int max_instr; int err; - stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; - if (prog->aux->stack_depth > stack_size) { - nn_info(nn, "stack too large: program %dB > FW stack %dB\n", - prog->aux->stack_depth, stack_size); - return -EOPNOTSUPP; - } - nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4); - max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); nfp_prog->__prog_alloc_len = max_instr * sizeof(u64); @@ -299,10 +262,25 @@ static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value) unsigned int i; for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++) - if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT) + if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT) word[i] = (__force u32)cpu_to_be32(word[i]); } +/* Mark value as unsafely initialized in case it becomes atomic later + * and we didn't byte swap something non-byte swap neutral. + */ +static void +nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value) +{ + u32 *word = value; + unsigned int i; + + for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++) + if (nfp_map->use_map[i].type == NFP_MAP_UNUSED && + word[i] != (__force u32)cpu_to_be32(word[i])) + nfp_map->use_map[i].non_zero_update = 1; +} + static int nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap, void *key, void *value) @@ -322,6 +300,7 @@ nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap, void *key, void *value, u64 flags) { nfp_map_bpf_byte_swap(offmap->dev_priv, value); + nfp_map_bpf_byte_swap_record(offmap->dev_priv, value); return nfp_bpf_ctrl_update_entry(offmap, key, value, flags); } @@ -510,7 +489,7 @@ nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; - unsigned int max_mtu; + unsigned int max_mtu, max_stack, max_prog_len; dma_addr_t dma_addr; void *img; int err; @@ -521,6 +500,18 @@ nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, return -EOPNOTSUPP; } + max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; + if (nfp_prog->stack_size > max_stack) { + NL_SET_ERR_MSG_MOD(extack, "stack too large"); + return -EOPNOTSUPP; + } + + max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); + if (nfp_prog->prog_len > max_prog_len) { + NL_SET_ERR_MSG_MOD(extack, "program too long"); + return -EOPNOTSUPP; + } + img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv); if (IS_ERR(img)) return PTR_ERR(img); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index a6e9248669e1..99f977bfd8cc 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -1,43 +1,15 @@ -/* - * Copyright (C) 2016-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/kernel.h> +#include <linux/netdevice.h> #include <linux/pkt_cls.h> #include "../nfp_app.h" #include "../nfp_main.h" +#include "../nfp_net.h" #include "fw.h" #include "main.h" @@ -108,6 +80,46 @@ exit_set_location: nfp_prog->adjust_head_location = location; } +static bool nfp_bpf_map_update_value_ok(struct bpf_verifier_env *env) +{ + const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1; + const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3; + struct bpf_offloaded_map *offmap; + struct bpf_func_state *state; + struct nfp_bpf_map *nfp_map; + int off, i; + + state = env->cur_state->frame[reg3->frameno]; + + /* We need to record each time update happens with non-zero words, + * in case such word is used in atomic operations. + * Implicitly depend on nfp_bpf_stack_arg_ok(reg3) being run before. + */ + + offmap = map_to_offmap(reg1->map_ptr); + nfp_map = offmap->dev_priv; + off = reg3->off + reg3->var_off.value; + + for (i = 0; i < offmap->map.value_size; i++) { + struct bpf_stack_state *stack_entry; + unsigned int soff; + + soff = -(off + i) - 1; + stack_entry = &state->stack[soff / BPF_REG_SIZE]; + if (stack_entry->slot_type[soff % BPF_REG_SIZE] == STACK_ZERO) + continue; + + if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) { + pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n", + i, soff); + return false; + } + nfp_map->use_map[i / 4].non_zero_update = 1; + } + + return true; +} + static int nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env, const struct bpf_reg_state *reg, @@ -155,8 +167,9 @@ nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env, } static int -nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, - struct nfp_insn_meta *meta) +nfp_bpf_check_helper_call(struct nfp_prog *nfp_prog, + struct bpf_verifier_env *env, + struct nfp_insn_meta *meta) { const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1; const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2; @@ -198,7 +211,8 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, bpf->helpers.map_update, reg1) || !nfp_bpf_stack_arg_ok("map_update", env, reg2, meta->func_id ? &meta->arg2 : NULL) || - !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL)) + !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL) || + !nfp_bpf_map_update_value_ok(env)) return -EOPNOTSUPP; break; @@ -333,6 +347,9 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, { s32 old_off, new_off; + if (reg->frameno != env->cur_state->curframe) + meta->flags |= FLAG_INSN_PTR_CALLER_STACK_FRAME; + if (!tnum_is_const(reg->var_off)) { pr_vlog(env, "variable ptr stack access\n"); return -EINVAL; @@ -376,15 +393,22 @@ nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env, struct nfp_bpf_map *nfp_map, unsigned int off, enum nfp_bpf_map_use use) { - if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED && - nfp_map->use_map[off / 4] != use) { + if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED && + nfp_map->use_map[off / 4].type != use) { pr_vlog(env, "map value use type conflict %s vs %s off: %u\n", - nfp_bpf_map_use_name(nfp_map->use_map[off / 4]), + nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type), nfp_bpf_map_use_name(use), off); return -EOPNOTSUPP; } - nfp_map->use_map[off / 4] = use; + if (nfp_map->use_map[off / 4].non_zero_update && + use == NFP_MAP_USE_ATOMIC_CNT) { + pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n", + off); + return -EOPNOTSUPP; + } + + nfp_map->use_map[off / 4].type = use; return 0; } @@ -620,8 +644,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) return -EINVAL; } - if (meta->insn.code == (BPF_JMP | BPF_CALL)) - return nfp_bpf_check_call(nfp_prog, env, meta); + if (is_mbpf_helper_call(meta)) + return nfp_bpf_check_helper_call(nfp_prog, env, meta); if (meta->insn.code == (BPF_JMP | BPF_EXIT)) return nfp_bpf_check_exit(nfp_prog, env); @@ -640,6 +664,132 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) return 0; } +static int +nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env, + struct nfp_prog *nfp_prog) +{ + struct nfp_insn_meta *meta; + int index = 0; + + list_for_each_entry(meta, &nfp_prog->insns, l) { + if (nfp_is_subprog_start(meta)) + index++; + meta->subprog_idx = index; + + if (meta->insn.dst_reg >= BPF_REG_6 && + meta->insn.dst_reg <= BPF_REG_9) + nfp_prog->subprog[index].needs_reg_push = 1; + } + + if (index + 1 != nfp_prog->subprog_cnt) { + pr_vlog(env, "BUG: number of processed BPF functions is not consistent (processed %d, expected %d)\n", + index + 1, nfp_prog->subprog_cnt); + return -EFAULT; + } + + return 0; +} + +static unsigned int +nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt) +{ + struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog); + unsigned int max_depth = 0, depth = 0, frame = 0; + struct nfp_insn_meta *ret_insn[MAX_CALL_FRAMES]; + unsigned short frame_depths[MAX_CALL_FRAMES]; + unsigned short ret_prog[MAX_CALL_FRAMES]; + unsigned short idx = meta->subprog_idx; + + /* Inspired from check_max_stack_depth() from kernel verifier. + * Starting from main subprogram, walk all instructions and recursively + * walk all callees that given subprogram can call. Since recursion is + * prevented by the kernel verifier, this algorithm only needs a local + * stack of MAX_CALL_FRAMES to remember callsites. + */ +process_subprog: + frame_depths[frame] = nfp_prog->subprog[idx].stack_depth; + frame_depths[frame] = round_up(frame_depths[frame], STACK_FRAME_ALIGN); + depth += frame_depths[frame]; + max_depth = max(max_depth, depth); + +continue_subprog: + for (; meta != nfp_prog_last_meta(nfp_prog) && meta->subprog_idx == idx; + meta = nfp_meta_next(meta)) { + if (!is_mbpf_pseudo_call(meta)) + continue; + + /* We found a call to a subprogram. Remember instruction to + * return to and subprog id. + */ + ret_insn[frame] = nfp_meta_next(meta); + ret_prog[frame] = idx; + + /* Find the callee and start processing it. */ + meta = nfp_bpf_goto_meta(nfp_prog, meta, + meta->n + 1 + meta->insn.imm, cnt); + idx = meta->subprog_idx; + frame++; + goto process_subprog; + } + /* End of for() loop means the last instruction of the subprog was + * reached. If we popped all stack frames, return; otherwise, go on + * processing remaining instructions from the caller. + */ + if (frame == 0) + return max_depth; + + depth -= frame_depths[frame]; + frame--; + meta = ret_insn[frame]; + idx = ret_prog[frame]; + goto continue_subprog; +} + +static int nfp_bpf_finalize(struct bpf_verifier_env *env) +{ + struct bpf_subprog_info *info; + struct nfp_prog *nfp_prog; + unsigned int max_stack; + struct nfp_net *nn; + int i; + + nfp_prog = env->prog->aux->offload->dev_priv; + nfp_prog->subprog_cnt = env->subprog_cnt; + nfp_prog->subprog = kcalloc(nfp_prog->subprog_cnt, + sizeof(nfp_prog->subprog[0]), GFP_KERNEL); + if (!nfp_prog->subprog) + return -ENOMEM; + + nfp_assign_subprog_idx_and_regs(env, nfp_prog); + + info = env->subprog_info; + for (i = 0; i < nfp_prog->subprog_cnt; i++) { + nfp_prog->subprog[i].stack_depth = info[i].stack_depth; + + if (i == 0) + continue; + + /* Account for size of return address. */ + nfp_prog->subprog[i].stack_depth += REG_WIDTH; + /* Account for size of saved registers, if necessary. */ + if (nfp_prog->subprog[i].needs_reg_push) + nfp_prog->subprog[i].stack_depth += BPF_REG_SIZE * 4; + } + + nn = netdev_priv(env->prog->aux->offload->netdev); + max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; + nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog, + env->prog->len); + if (nfp_prog->stack_size > max_stack) { + pr_vlog(env, "stack too large: program %dB > FW stack %dB\n", + nfp_prog->stack_size, max_stack); + return -EOPNOTSUPP; + } + + return 0; +} + const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = { - .insn_hook = nfp_verify_insn, + .insn_hook = nfp_verify_insn, + .finalize = nfp_bpf_finalize, }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 46ba0cf257c6..244dc261006e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <net/geneve.h> @@ -429,12 +399,14 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, switch (off) { case offsetof(struct iphdr, daddr): - set_ip_addr->ipv4_dst_mask = mask; - set_ip_addr->ipv4_dst = exact; + set_ip_addr->ipv4_dst_mask |= mask; + set_ip_addr->ipv4_dst &= ~mask; + set_ip_addr->ipv4_dst |= exact & mask; break; case offsetof(struct iphdr, saddr): - set_ip_addr->ipv4_src_mask = mask; - set_ip_addr->ipv4_src = exact; + set_ip_addr->ipv4_src_mask |= mask; + set_ip_addr->ipv4_src &= ~mask; + set_ip_addr->ipv4_src |= exact & mask; break; default: return -EOPNOTSUPP; @@ -448,11 +420,12 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, } static void -nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask, +nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask, struct nfp_fl_set_ipv6_addr *ip6) { - ip6->ipv6[idx % 4].mask = mask; - ip6->ipv6[idx % 4].exact = exact; + ip6->ipv6[word].mask |= mask; + ip6->ipv6[word].exact &= ~mask; + ip6->ipv6[word].exact |= exact & mask; ip6->reserved = cpu_to_be16(0); ip6->head.jump_id = opcode_tag; @@ -465,6 +438,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, struct nfp_fl_set_ipv6_addr *ip_src) { __be32 exact, mask; + u8 word; /* We are expecting tcf_pedit to return a big endian value */ mask = (__force __be32)~tcf_pedit_mask(action, idx); @@ -473,17 +447,20 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, if (exact & ~mask) return -EOPNOTSUPP; - if (off < offsetof(struct ipv6hdr, saddr)) + if (off < offsetof(struct ipv6hdr, saddr)) { return -EOPNOTSUPP; - else if (off < offsetof(struct ipv6hdr, daddr)) - nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx, + } else if (off < offsetof(struct ipv6hdr, daddr)) { + word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact); + nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word, exact, mask, ip_src); - else if (off < offsetof(struct ipv6hdr, daddr) + - sizeof(struct in6_addr)) - nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx, + } else if (off < offsetof(struct ipv6hdr, daddr) + + sizeof(struct in6_addr)) { + word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact); + nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word, exact, mask, ip_dst); - else + } else { return -EOPNOTSUPP; + } return 0; } @@ -541,7 +518,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, struct nfp_fl_set_eth set_eth; enum pedit_header_type htype; int idx, nkeys, err; - size_t act_size; + size_t act_size = 0; u32 offset, cmd; u8 ip_proto = 0; @@ -599,7 +576,9 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, act_size = sizeof(set_eth); memcpy(nfp_action, &set_eth, act_size); *a_len += act_size; - } else if (set_ip_addr.head.len_lw) { + } + if (set_ip_addr.head.len_lw) { + nfp_action += act_size; act_size = sizeof(set_ip_addr); memcpy(nfp_action, &set_ip_addr, act_size); *a_len += act_size; @@ -607,10 +586,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | nfp_fl_csum_l4_to_flag(ip_proto); - } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { + } + if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { /* TC compiles set src and dst IPv6 address as a single action, * the hardware requires this to be 2 separate actions. */ + nfp_action += act_size; act_size = sizeof(set_ip6_src); memcpy(nfp_action, &set_ip6_src, act_size); *a_len += act_size; @@ -623,6 +604,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, /* Hardware will automatically fix TCP/UDP checksum. */ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_dst.head.len_lw) { + nfp_action += act_size; act_size = sizeof(set_ip6_dst); memcpy(nfp_action, &set_ip6_dst, act_size); *a_len += act_size; @@ -630,13 +612,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, /* Hardware will automatically fix TCP/UDP checksum. */ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_src.head.len_lw) { + nfp_action += act_size; act_size = sizeof(set_ip6_src); memcpy(nfp_action, &set_ip6_src, act_size); *a_len += act_size; /* Hardware will automatically fix TCP/UDP checksum. */ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); - } else if (set_tport.head.len_lw) { + } + if (set_tport.head.len_lw) { + nfp_action += act_size; act_size = sizeof(set_tport); memcpy(nfp_action, &set_tport, act_size); *a_len += act_size; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index cb8565222621..4c5eaf36d5bb 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 325954b829c8..29d673aa5277 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef NFP_FLOWER_CMSG_H #define NFP_FLOWER_CMSG_H diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index bf10598f66ae..81dcf5b318ba 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include "main.h" diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index e57d23746585..3a54728d2ea6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/etherdevice.h> #include <linux/lockdep.h> @@ -518,8 +488,8 @@ err_clear_nn: static int nfp_flower_init(struct nfp_app *app) { const struct nfp_pf *pf = app->pf; + u64 version, features, ctx_count; struct nfp_flower_priv *app_priv; - u64 version, features; int err; if (!pf->eth_tbl) { @@ -543,6 +513,16 @@ static int nfp_flower_init(struct nfp_app *app) return err; } + ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT", + &err); + if (err) { + nfp_warn(app->cpp, + "FlowerNIC: unsupported host context count: %d\n", + err); + err = 0; + ctx_count = BIT(17); + } + /* We need to ensure hardware has enough flower capabilities. */ if (version != NFP_FLOWER_ALLOWED_VER) { nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n"); @@ -553,6 +533,7 @@ static int nfp_flower_init(struct nfp_app *app) if (!app_priv) return -ENOMEM; + app_priv->stats_ring_size = roundup_pow_of_two(ctx_count); app->priv = app_priv; app_priv->app = app; skb_queue_head_init(&app_priv->cmsg_skbs_high); @@ -563,7 +544,7 @@ static int nfp_flower_init(struct nfp_app *app) init_waitqueue_head(&app_priv->mtu_conf.wait_q); spin_lock_init(&app_priv->mtu_conf.lock); - err = nfp_flower_metadata_init(app); + err = nfp_flower_metadata_init(app, ctx_count); if (err) goto err_free_app_priv; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 81d941ab895c..90045bab95bf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef __NFP_FLOWER_H__ #define __NFP_FLOWER_H__ 1 @@ -38,6 +8,7 @@ #include <linux/circ_buf.h> #include <linux/hashtable.h> +#include <linux/rhashtable.h> #include <linux/time64.h> #include <linux/types.h> #include <net/pkt_cls.h> @@ -50,10 +21,8 @@ struct net_device; struct nfp_app; #define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff) -#define NFP_FL_STATS_ENTRY_RS BIT(20) -#define NFP_FL_STATS_ELEM_RS 4 -#define NFP_FL_REPEATED_HASH_MAX BIT(17) -#define NFP_FLOWER_HASH_BITS 19 +#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \ + init_unalloc) #define NFP_FLOWER_MASK_ENTRY_RS 256 #define NFP_FLOWER_MASK_ELEMENT_RS 1 #define NFP_FLOWER_MASK_HASH_BITS 10 @@ -138,7 +107,10 @@ struct nfp_fl_lag { * @stats_ids: List of free stats ids * @mask_ids: List of free mask ids * @mask_table: Hash table used to store masks + * @stats_ring_size: Maximum number of allowed stats ids * @flow_table: Hash table used to store flower rules + * @stats: Stored stats updates for flower rules + * @stats_lock: Lock for flower rule stats updates * @cmsg_work: Workqueue for control messages processing * @cmsg_skbs_high: List of higher priority skbs for control message * processing @@ -171,7 +143,10 @@ struct nfp_flower_priv { struct nfp_fl_stats_id stats_ids; struct nfp_fl_mask_id mask_ids; DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); - DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); + u32 stats_ring_size; + struct rhashtable flow_table; + struct nfp_fl_stats *stats; + spinlock_t stats_lock; /* lock stats */ struct work_struct cmsg_work; struct sk_buff_head cmsg_skbs_high; struct sk_buff_head cmsg_skbs_low; @@ -227,10 +202,8 @@ struct nfp_fl_stats { struct nfp_fl_payload { struct nfp_fl_rule_metadata meta; unsigned long tc_flower_cookie; - struct hlist_node link; + struct rhash_head fl_node; struct rcu_head rcu; - spinlock_t lock; /* lock stats */ - struct nfp_fl_stats stats; __be32 nfp_tun_ipv4_addr; struct net_device *ingress_dev; char *unmasked_data; @@ -239,6 +212,8 @@ struct nfp_fl_payload { bool ingress_offload; }; +extern const struct rhashtable_params nfp_flower_table_params; + struct nfp_fl_stats_frame { __be32 stats_con_id; __be32 pkt_count; @@ -246,7 +221,7 @@ struct nfp_fl_stats_frame { __be64 stats_cookie; }; -int nfp_flower_metadata_init(struct nfp_app *app); +int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count); void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 17acb8cc6044..e54fb6034326 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <net/pkt_cls.h> diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index c098730544b7..48729bf171e0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/hash.h> #include <linux/hashtable.h> @@ -48,6 +18,12 @@ struct nfp_mask_id_table { u8 mask_id; }; +struct nfp_fl_flow_table_cmp_arg { + struct net_device *netdev; + unsigned long cookie; + __be32 host_ctx; +}; + static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) { struct nfp_flower_priv *priv = app->priv; @@ -55,14 +31,14 @@ static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) ring = &priv->stats_ids.free_list; /* Check if buffer is full. */ - if (!CIRC_SPACE(ring->head, ring->tail, NFP_FL_STATS_ENTRY_RS * - NFP_FL_STATS_ELEM_RS - + if (!CIRC_SPACE(ring->head, ring->tail, + priv->stats_ring_size * NFP_FL_STATS_ELEM_RS - NFP_FL_STATS_ELEM_RS + 1)) return -ENOBUFS; memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS); ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) % - (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS); + (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS); return 0; } @@ -74,7 +50,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) struct circ_buf *ring; ring = &priv->stats_ids.free_list; - freed_stats_id = NFP_FL_STATS_ENTRY_RS; + freed_stats_id = priv->stats_ring_size; /* Check for unallocated entries first. */ if (priv->stats_ids.init_unalloc > 0) { *stats_context_id = priv->stats_ids.init_unalloc - 1; @@ -92,7 +68,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) *stats_context_id = temp_stats_id; memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS); ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) % - (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS); + (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS); return 0; } @@ -102,56 +78,37 @@ struct nfp_fl_payload * nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, struct net_device *netdev, __be32 host_ctx) { + struct nfp_fl_flow_table_cmp_arg flower_cmp_arg; struct nfp_flower_priv *priv = app->priv; - struct nfp_fl_payload *flower_entry; - hash_for_each_possible_rcu(priv->flow_table, flower_entry, link, - tc_flower_cookie) - if (flower_entry->tc_flower_cookie == tc_flower_cookie && - (!netdev || flower_entry->ingress_dev == netdev) && - (host_ctx == NFP_FL_STATS_CTX_DONT_CARE || - flower_entry->meta.host_ctx_id == host_ctx)) - return flower_entry; + flower_cmp_arg.netdev = netdev; + flower_cmp_arg.cookie = tc_flower_cookie; + flower_cmp_arg.host_ctx = host_ctx; - return NULL; -} - -static void -nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats) -{ - struct nfp_fl_payload *nfp_flow; - unsigned long flower_cookie; - - flower_cookie = be64_to_cpu(stats->stats_cookie); - - rcu_read_lock(); - nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL, - stats->stats_con_id); - if (!nfp_flow) - goto exit_rcu_unlock; - - spin_lock(&nfp_flow->lock); - nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count); - nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count); - nfp_flow->stats.used = jiffies; - spin_unlock(&nfp_flow->lock); - -exit_rcu_unlock: - rcu_read_unlock(); + return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg, + nfp_flower_table_params); } void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb) { unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); - struct nfp_fl_stats_frame *stats_frame; + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_stats_frame *stats; unsigned char *msg; + u32 ctx_id; int i; msg = nfp_flower_cmsg_get_data(skb); - stats_frame = (struct nfp_fl_stats_frame *)msg; - for (i = 0; i < msg_len / sizeof(*stats_frame); i++) - nfp_flower_update_stats(app, stats_frame + i); + spin_lock(&priv->stats_lock); + for (i = 0; i < msg_len / sizeof(*stats); i++) { + stats = (struct nfp_fl_stats_frame *)msg + i; + ctx_id = be32_to_cpu(stats->stats_con_id); + priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count); + priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count); + priv->stats[ctx_id].used = jiffies; + } + spin_unlock(&priv->stats_lock); } static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) @@ -345,9 +302,9 @@ int nfp_compile_flow_metadata(struct nfp_app *app, /* Update flow payload with mask ids. */ nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; - nfp_flow->stats.pkts = 0; - nfp_flow->stats.bytes = 0; - nfp_flow->stats.used = jiffies; + priv->stats[stats_cxt].pkts = 0; + priv->stats[stats_cxt].bytes = 0; + priv->stats[stats_cxt].used = jiffies; check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev, NFP_FL_STATS_CTX_DONT_CARE); @@ -389,12 +346,56 @@ int nfp_modify_flow_metadata(struct nfp_app *app, return nfp_release_stats_entry(app, temp_ctx_id); } -int nfp_flower_metadata_init(struct nfp_app *app) +static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg, + const void *obj) +{ + const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key; + const struct nfp_fl_payload *flow_entry = obj; + + if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) && + (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE || + flow_entry->meta.host_ctx_id == cmp_arg->host_ctx)) + return flow_entry->tc_flower_cookie != cmp_arg->cookie; + + return 1; +} + +static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed) +{ + const struct nfp_fl_payload *flower_entry = data; + + return jhash2((u32 *)&flower_entry->tc_flower_cookie, + sizeof(flower_entry->tc_flower_cookie) / sizeof(u32), + seed); +} + +static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed) +{ + const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data; + + return jhash2((u32 *)&cmp_arg->cookie, + sizeof(cmp_arg->cookie) / sizeof(u32), seed); +} + +const struct rhashtable_params nfp_flower_table_params = { + .head_offset = offsetof(struct nfp_fl_payload, fl_node), + .hashfn = nfp_fl_key_hashfn, + .obj_cmpfn = nfp_fl_obj_cmpfn, + .obj_hashfn = nfp_fl_obj_hashfn, + .automatic_shrinking = true, +}; + +int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count) { struct nfp_flower_priv *priv = app->priv; + int err; hash_init(priv->mask_table); - hash_init(priv->flow_table); + + err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params); + if (err) + return err; + get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); /* Init ring buffer and unallocated mask_ids. */ @@ -402,7 +403,7 @@ int nfp_flower_metadata_init(struct nfp_app *app) kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); if (!priv->mask_ids.mask_id_free_list.buf) - return -ENOMEM; + goto err_free_flow_table; priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; @@ -416,18 +417,29 @@ int nfp_flower_metadata_init(struct nfp_app *app) /* Init ring buffer and unallocated stats_ids. */ priv->stats_ids.free_list.buf = vmalloc(array_size(NFP_FL_STATS_ELEM_RS, - NFP_FL_STATS_ENTRY_RS)); + priv->stats_ring_size)); if (!priv->stats_ids.free_list.buf) goto err_free_last_used; - priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX; + priv->stats_ids.init_unalloc = host_ctx_count; + + priv->stats = kvmalloc_array(priv->stats_ring_size, + sizeof(struct nfp_fl_stats), GFP_KERNEL); + if (!priv->stats) + goto err_free_ring_buf; + + spin_lock_init(&priv->stats_lock); return 0; +err_free_ring_buf: + vfree(priv->stats_ids.free_list.buf); err_free_last_used: kfree(priv->mask_ids.last_used); err_free_mask_id: kfree(priv->mask_ids.mask_id_free_list.buf); +err_free_flow_table: + rhashtable_destroy(&priv->flow_table); return -ENOMEM; } @@ -438,6 +450,9 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) if (!priv) return; + rhashtable_free_and_destroy(&priv->flow_table, + nfp_check_rhashtable_empty, NULL); + kvfree(priv->stats); kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.last_used); vfree(priv->stats_ids.free_list.buf); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index bd19624f10cf..29c95423ab64 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/skbuff.h> #include <net/devlink.h> @@ -428,8 +398,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) flow_pay->nfp_tun_ipv4_addr = 0; flow_pay->meta.flags = 0; - spin_lock_init(&flow_pay->lock); - flow_pay->ingress_offload = !egress; return flow_pay; @@ -513,9 +481,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; - INIT_HLIST_NODE(&flow_pay->link); flow_pay->tc_flower_cookie = flow->cookie; - hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie); + err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, + nfp_flower_table_params); + if (err) + goto err_destroy_flow; + port->tc_offload_cnt++; /* Deallocate flow payload when flower rule has been destroyed. */ @@ -550,6 +521,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flow, bool egress) { struct nfp_port *port = nfp_port_from_netdev(netdev); + struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; struct net_device *ingr_dev; int err; @@ -573,11 +545,13 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, goto err_free_flow; err_free_flow: - hash_del_rcu(&nfp_flow->link); port->tc_offload_cnt--; kfree(nfp_flow->action_data); kfree(nfp_flow->mask_data); kfree(nfp_flow->unmasked_data); + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, + &nfp_flow->fl_node, + nfp_flower_table_params)); kfree_rcu(nfp_flow, rcu); return err; } @@ -598,8 +572,10 @@ static int nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flow, bool egress) { + struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; struct net_device *ingr_dev; + u32 ctx_id; ingr_dev = egress ? NULL : netdev; nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, @@ -610,13 +586,16 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, if (nfp_flow->ingress_offload && egress) return 0; - spin_lock_bh(&nfp_flow->lock); - tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes, - nfp_flow->stats.pkts, nfp_flow->stats.used); + ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); + + spin_lock_bh(&priv->stats_lock); + tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes, + priv->stats[ctx_id].pkts, + priv->stats[ctx_id].used); - nfp_flow->stats.pkts = 0; - nfp_flow->stats.bytes = 0; - spin_unlock_bh(&nfp_flow->lock); + priv->stats[ctx_id].pkts = 0; + priv->stats[ctx_id].bytes = 0; + spin_unlock_bh(&priv->stats_lock); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 382bb93cb090..8e5bec04d1f9 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -1,39 +1,10 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/etherdevice.h> #include <linux/inetdevice.h> #include <net/netevent.h> +#include <net/vxlan.h> #include <linux/idr.h> #include <net/dst_metadata.h> #include <net/arp.h> @@ -217,7 +188,7 @@ static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev) return false; if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch")) return true; - if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan")) + if (netif_is_vxlan(netdev)) return true; return false; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_abi.h b/drivers/net/ethernet/netronome/nfp/nfp_abi.h index 8b56c27931bf..dd359a44adfb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_abi.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_abi.h @@ -1,36 +1,5 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2018 Netronome Systems, Inc. */ #ifndef __NFP_ABI__ #define __NFP_ABI__ 1 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 8607d09ab732..68a0991aac22 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bug.h> #include <linux/lockdep.h> @@ -60,6 +30,11 @@ static const struct nfp_app_type *apps[] = { #endif }; +void nfp_check_rhashtable_empty(void *ptr, void *arg) +{ + WARN_ON_ONCE(1); +} + struct nfp_app *nfp_app_from_netdev(struct net_device *netdev) { if (nfp_netdev_is_nfp_net(netdev)) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 4e1eb3395648..4d6ecf99b1cc 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef _NFP_APP_H #define _NFP_APP_H 1 @@ -40,6 +10,8 @@ #include "nfp_net_repr.h" +#define NFP_APP_CTRL_MTU_MAX U32_MAX + struct bpf_prog; struct net_device; struct netdev_bpf; @@ -178,6 +150,7 @@ struct nfp_app_type { * @ctrl: pointer to ctrl vNIC struct * @reprs: array of pointers to representors * @type: pointer to const application ops and info + * @ctrl_mtu: MTU to set on the control vNIC (set in .init()) * @priv: app-specific priv data */ struct nfp_app { @@ -189,9 +162,11 @@ struct nfp_app { struct nfp_reprs __rcu *reprs[NFP_REPR_TYPE_MAX + 1]; const struct nfp_app_type *type; + unsigned int ctrl_mtu; void *priv; }; +void nfp_check_rhashtable_empty(void *ptr, void *arg); bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c index e2dfe4f168bb..f119277fd66c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_nsp.h" diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index cc6ace2be8a9..b04b83687fe2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #include <linux/bitops.h> #include <linux/errno.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index fad0e62a910c..648c2810e5ba 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #ifndef __NFP_ASM_H__ #define __NFP_ASM_H__ 1 @@ -82,6 +52,15 @@ #define OP_BR_BIT_ADDR_LO OP_BR_ADDR_LO #define OP_BR_BIT_ADDR_HI OP_BR_ADDR_HI +#define OP_BR_ALU_BASE 0x0e800000000ULL +#define OP_BR_ALU_BASE_MASK 0x0ff80000000ULL +#define OP_BR_ALU_A_SRC 0x000000003ffULL +#define OP_BR_ALU_B_SRC 0x000000ffc00ULL +#define OP_BR_ALU_DEFBR 0x00000300000ULL +#define OP_BR_ALU_IMM_HI 0x0007fc00000ULL +#define OP_BR_ALU_SRC_LMEXTN 0x40000000000ULL +#define OP_BR_ALU_DST_LMEXTN 0x80000000000ULL + static inline bool nfp_is_br(u64 insn) { return (insn & OP_BR_BASE_MASK) == OP_BR_BASE || diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index db463e20a876..808647ec3573 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/rtnetlink.h> #include <net/devlink.h> @@ -96,6 +66,7 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index, { struct nfp_pf *pf = devlink_priv(devlink); struct nfp_eth_table_port eth_port; + unsigned int lanes; int ret; if (count < 2) @@ -114,8 +85,12 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index, goto out; } - ret = nfp_devlink_set_lanes(pf, eth_port.index, - eth_port.port_lanes / count); + /* Special case the 100G CXP -> 2x40G split */ + lanes = eth_port.port_lanes / count; + if (eth_port.lanes == 10 && count == 2) + lanes = 8 / count; + + ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes); out: mutex_unlock(&pf->lock); @@ -128,6 +103,7 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index, { struct nfp_pf *pf = devlink_priv(devlink); struct nfp_eth_table_port eth_port; + unsigned int lanes; int ret; mutex_lock(&pf->lock); @@ -143,7 +119,12 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index, goto out; } - ret = nfp_devlink_set_lanes(pf, eth_port.index, eth_port.port_lanes); + /* Special case the 100G CXP -> 2x40G unsplit */ + lanes = eth_port.port_lanes; + if (eth_port.port_lanes == 8) + lanes = 10; + + ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes); out: mutex_unlock(&pf->lock); @@ -177,7 +158,8 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) return nfp_app_eswitch_mode_get(pf->app, mode); } -static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) { struct nfp_pf *pf = devlink_priv(devlink); int ret; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c index f0dcf45aeec1..5cabb1aa9c0c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ #include <linux/kernel.h> #include <linux/bitops.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 4a540c5e27fe..6c10e8d119e4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_main.c @@ -68,6 +38,10 @@ static const struct pci_device_id nfp_pci_device_ids[] = { PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, }, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, + PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, + PCI_ANY_ID, 0, + }, { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, @@ -112,23 +86,18 @@ nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, void *out_data, u64 out_length) { - unsigned long long addr; unsigned long err_at; u64 max_data_sz; u32 val = 0; - u32 cpp_id; int n, err; if (!pf->mbox) return -EOPNOTSUPP; - cpp_id = NFP_CPP_ISLAND_ID(pf->mbox->target, NFP_CPP_ACTION_RW, 0, - pf->mbox->domain); - addr = pf->mbox->addr; - max_data_sz = pf->mbox->size - NFP_MBOX_SYM_MIN_SIZE; + max_data_sz = nfp_rtsym_size(pf->mbox) - NFP_MBOX_SYM_MIN_SIZE; /* Check if cmd field is clear */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val); if (err || val) { nfp_warn(pf->cpp, "failed to issue command (%u): %u, err: %d\n", cmd, val, err); @@ -136,30 +105,29 @@ int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, } in_length = min(in_length, max_data_sz); - n = nfp_cpp_write(pf->cpp, cpp_id, addr + NFP_MBOX_DATA, - in_data, in_length); + n = nfp_rtsym_write(pf->cpp, pf->mbox, NFP_MBOX_DATA, in_data, + in_length); if (n != in_length) return -EIO; /* Write data_len and wipe reserved */ - err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, - in_length); + err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, in_length); if (err) return err; /* Read back for ordering */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val); if (err) return err; /* Write cmd and wipe return value */ - err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, cmd); + err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_CMD, cmd); if (err) return err; err_at = jiffies + 5 * HZ; while (true) { /* Wait for command to go to 0 (NFP_MBOX_NO_CMD) */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val); if (err) return err; if (!val) @@ -172,18 +140,18 @@ int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, } /* Copy output if any (could be error info, do it before reading ret) */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val); if (err) return err; out_length = min_t(u32, val, min(out_length, max_data_sz)); - n = nfp_cpp_read(pf->cpp, cpp_id, addr + NFP_MBOX_DATA, - out_data, out_length); + n = nfp_rtsym_read(pf->cpp, pf->mbox, NFP_MBOX_DATA, + out_data, out_length); if (n != out_length) return -EIO; /* Check if there is an error */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_RET, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_RET, &val); if (err) return err; if (val) @@ -441,8 +409,11 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) } fw = nfp_net_fw_find(pdev, pf); - if (!fw) + if (!fw) { + if (nfp_nsp_has_stored_fw_load(nsp)) + nfp_nsp_load_stored_fw(nsp); return 0; + } dev_info(&pdev->dev, "Soft-reset, loading FW image\n"); err = nfp_nsp_device_soft_reset(nsp); @@ -453,7 +424,6 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) } err = nfp_nsp_load_fw(nsp, fw); - if (err < 0) { dev_err(&pdev->dev, "FW loading failed: %d\n", err); goto exit_release_fw; @@ -566,9 +536,9 @@ static int nfp_pf_find_rtsyms(struct nfp_pf *pf) /* Optional per-PCI PF mailbox */ snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id); pf->mbox = nfp_rtsym_lookup(pf->rtbl, pf_symbol); - if (pf->mbox && pf->mbox->size < NFP_MBOX_SYM_MIN_SIZE) { + if (pf->mbox && nfp_rtsym_size(pf->mbox) < NFP_MBOX_SYM_MIN_SIZE) { nfp_err(pf->cpp, "PF mailbox symbol too small: %llu < %d\n", - pf->mbox->size, NFP_MBOX_SYM_MIN_SIZE); + nfp_rtsym_size(pf->mbox), NFP_MBOX_SYM_MIN_SIZE); return -EINVAL; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 595b3dc280e3..a3613a2e0aa5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_main.h diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 439e6ffe2f05..6f0c37d09256 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net.h diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index c6d29fdbb880..6bddfcfdec34 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net_common.c @@ -2094,10 +2064,10 @@ static void nfp_ctrl_poll(unsigned long arg) { struct nfp_net_r_vector *r_vec = (void *)arg; - spin_lock_bh(&r_vec->lock); + spin_lock(&r_vec->lock); nfp_net_tx_complete(r_vec->tx_ring, 0); __nfp_ctrl_tx_queued(r_vec); - spin_unlock_bh(&r_vec->lock); + spin_unlock(&r_vec->lock); if (nfp_ctrl_rx(r_vec)) { nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); @@ -2187,9 +2157,13 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->txds) + &tx_ring->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!tx_ring->txds) { + netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", + tx_ring->cnt); goto err_alloc; + } tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs), GFP_KERNEL); @@ -2341,9 +2315,13 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) rx_ring->cnt = dp->rxd_cnt; rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - if (!rx_ring->rxds) + &rx_ring->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!rx_ring->rxds) { + netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", + rx_ring->cnt); goto err_alloc; + } rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs), GFP_KERNEL); @@ -3159,6 +3137,7 @@ static void nfp_net_stat64(struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); int r; + /* Collect software stats */ for (r = 0; r < nn->max_r_vecs; r++) { struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; u64 data[3]; @@ -3184,6 +3163,14 @@ static void nfp_net_stat64(struct net_device *netdev, stats->tx_bytes += data[1]; stats->tx_errors += data[2]; } + + /* Add in device stats */ + stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES); + stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS); + stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS); + + stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS); + stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS); } static int nfp_net_set_features(struct net_device *netdev, @@ -3751,15 +3738,18 @@ static void nfp_net_netdev_init(struct nfp_net *nn) } if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) netdev->hw_features |= NETIF_F_RXHASH; - if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && - nn->cap & NFP_NET_CFG_CTRL_NVGRE) { + if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) { if (nn->cap & NFP_NET_CFG_CTRL_LSO) - netdev->hw_features |= NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL; - nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; - - netdev->hw_enc_features = netdev->hw_features; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; + } + if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) { + if (nn->cap & NFP_NET_CFG_CTRL_LSO) + netdev->hw_features |= NETIF_F_GSO_GRE; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE; } + if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE)) + netdev->hw_enc_features = netdev->hw_features; netdev->vlan_features = netdev->hw_features; @@ -3864,10 +3854,20 @@ int nfp_net_init(struct nfp_net *nn) return err; /* Set default MTU and Freelist buffer size */ - if (nn->max_mtu < NFP_NET_DEFAULT_MTU) + if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) { + if (nn->app->ctrl_mtu <= nn->max_mtu) { + nn->dp.mtu = nn->app->ctrl_mtu; + } else { + if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX) + nn_warn(nn, "app requested MTU above max supported %u > %u\n", + nn->app->ctrl_mtu, nn->max_mtu); + nn->dp.mtu = nn->max_mtu; + } + } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) { nn->dp.mtu = nn->max_mtu; - else + } else { nn->dp.mtu = NFP_NET_DEFAULT_MTU; + } nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); if (nfp_app_ctrl_uses_data_vnics(nn->app)) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c index 1f9149bb2ae6..f2aaef976c7d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/device.h> @@ -113,6 +83,13 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, caps->mbox_len = length; } break; + case NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0: + case NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1: + dev_warn(dev, + "experimental TLV type:%u offset:%u len:%u\n", + FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr), + offset, length); + break; default: if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr)) break; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 44d3ea75d043..d7c8518ac952 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net_ctrl.h @@ -264,7 +234,6 @@ * %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code */ #define NFP_NET_CFG_BPF_ABI 0x0080 -#define NFP_NET_BPF_ABI 2 #define NFP_NET_CFG_BPF_CAP 0x0081 #define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */ #define NFP_NET_CFG_BPF_MAX_LEN 0x0082 @@ -489,12 +458,20 @@ * %NFP_NET_CFG_TLV_TYPE_MBOX: * Variable, mailbox area. Overwrites the default location which is * %NFP_NET_CFG_MBOX_BASE and length %NFP_NET_CFG_MBOX_VAL_MAX_SZ. + * + * %NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0: + * %NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1: + * Variable, experimental IDs. IDs designated for internal development and + * experiments before a stable TLV ID has been allocated to a feature. Should + * never be present in production firmware. */ #define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0 #define NFP_NET_CFG_TLV_TYPE_RESERVED 1 #define NFP_NET_CFG_TLV_TYPE_END 2 #define NFP_NET_CFG_TLV_TYPE_ME_FREQ 3 #define NFP_NET_CFG_TLV_TYPE_MBOX 4 +#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0 5 +#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1 6 struct device; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c index bb8ed460086e..769ceef09756 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/ethtool.h> #include <linux/vmalloc.h> @@ -188,25 +158,21 @@ nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl) const struct nfp_rtsym *specsym; struct nfp_dumpspec *dumpspec; int bytes_read; - u32 cpp_id; + u64 sym_size; specsym = nfp_rtsym_lookup(rtbl, NFP_DUMP_SPEC_RTSYM); if (!specsym) return NULL; + sym_size = nfp_rtsym_size(specsym); /* expected size of this buffer is in the order of tens of kilobytes */ - dumpspec = vmalloc(sizeof(*dumpspec) + specsym->size); + dumpspec = vmalloc(sizeof(*dumpspec) + sym_size); if (!dumpspec) return NULL; + dumpspec->size = sym_size; - dumpspec->size = specsym->size; - - cpp_id = NFP_CPP_ISLAND_ID(specsym->target, NFP_CPP_ACTION_RW, 0, - specsym->domain); - - bytes_read = nfp_cpp_read(cpp, cpp_id, specsym->addr, dumpspec->data, - specsym->size); - if (bytes_read != specsym->size) { + bytes_read = nfp_rtsym_read(cpp, specsym, 0, dumpspec->data, sym_size); + if (bytes_read != sym_size) { vfree(dumpspec); nfp_warn(cpp, "Debug dump specification read failed.\n"); return NULL; @@ -266,7 +232,6 @@ nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec) struct nfp_dumpspec_rtsym *spec_rtsym; const struct nfp_rtsym *sym; u32 tl_len, key_len; - u32 size; spec_rtsym = (struct nfp_dumpspec_rtsym *)spec; tl_len = be32_to_cpu(spec->length); @@ -278,13 +243,8 @@ nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec) if (!sym) return nfp_dump_error_tlv_size(spec); - if (sym->type == NFP_RTSYM_TYPE_ABS) - size = sizeof(sym->addr); - else - size = sym->size; - return ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1) + - ALIGN8(size); + ALIGN8(nfp_rtsym_size(sym)); } static int @@ -644,7 +604,6 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, const struct nfp_rtsym *sym; u32 tl_len, key_len; int bytes_read; - u32 cpp_id; void *dest; int err; @@ -657,11 +616,7 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, if (!sym) return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump); - if (sym->type == NFP_RTSYM_TYPE_ABS) - sym_size = sizeof(sym->addr); - else - sym_size = sym->size; - + sym_size = nfp_rtsym_size(sym); header_size = ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1); total_size = header_size + ALIGN8(sym_size); @@ -676,23 +631,20 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, memcpy(dump_header->rtsym, spec->rtsym, key_len + 1); dump_header->cpp.dump_length = cpu_to_be32(sym_size); - if (sym->type == NFP_RTSYM_TYPE_ABS) { - *(u64 *)dest = sym->addr; - } else { + if (sym->type != NFP_RTSYM_TYPE_ABS) { cpp_params.target = sym->target; cpp_params.action = NFP_CPP_ACTION_RW; cpp_params.token = 0; cpp_params.island = sym->domain; - cpp_id = nfp_get_numeric_cpp_id(&cpp_params); dump_header->cpp.cpp_id = cpp_params; dump_header->cpp.offset = cpu_to_be32(sym->addr); - bytes_read = nfp_cpp_read(pf->cpp, cpp_id, sym->addr, dest, - sym_size); - if (bytes_read != sym_size) { - if (bytes_read >= 0) - bytes_read = -EIO; - dump_header->error = cpu_to_be32(bytes_read); - } + } + + bytes_read = nfp_rtsym_read(pf->cpp, sym, 0, dest, sym_size); + if (bytes_read != sym_size) { + if (bytes_read >= 0) + bytes_read = -EIO; + dump_header->error = cpu_to_be32(bytes_read); } return 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 099b63d67451..69b1c9b62e3d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ #include <linux/debugfs.h> #include <linux/module.h> #include <linux/rtnetlink.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 6a79c8e4a7a4..cb9c512abc76 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net_ethtool.c diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 28516eecccc8..1e7d20468a34 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net_main.c @@ -470,8 +440,8 @@ static void nfp_net_pci_unmap_mem(struct nfp_pf *pf) static int nfp_net_pci_map_mem(struct nfp_pf *pf) { + u32 min_size, cpp_id; u8 __iomem *mem; - u32 min_size; int err; min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE; @@ -519,9 +489,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) pf->vfcfg_tbl2 = NULL; } - mem = nfp_cpp_map_area(pf->cpp, "net.qc", 0, 0, - NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ, - &pf->qc_area); + cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); + mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id, NFP_PCIE_QUEUE(0), + NFP_QCP_QUEUE_AREA_SZ, &pf->qc_area); if (IS_ERR(mem)) { nfp_err(pf->cpp, "Failed to map Queue Controller area.\n"); err = PTR_ERR(mem); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 18a09cdcd9c6..c09b893c30dd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/etherdevice.h> #include <linux/io-64-nonatomic-hi-lo.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index 1bf2b18109ab..c412b94bfb97 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef NFP_NET_REPR_H #define NFP_NET_REPR_H diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 8b1b962cf1d1..b6ec46ed0540 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/errno.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h index e9df9d1eab8e..c9f09c5bb5ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017 Netronome Systems, Inc. */ #ifndef _NFP_NET_SRIOV_H_ #define _NFP_NET_SRIOV_H_ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index 68928c86b698..d2c1e9ea5668 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_netvf_main.c diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 9c1298114c70..86bc149ca231 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/lockdep.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 51f10ae2d53e..b2479a2a49e5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef _NFP_PORT_H_ #define _NFP_PORT_H_ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c index 0ecd83705368..814360ed3a20 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c @@ -1,36 +1,5 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/kernel.h> #include <net/devlink.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h index 6cee6382deb4..afab6f0fc564 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ #ifndef NFP_CRC32_H #define NFP_CRC32_H diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index f44d0a857314..db94b0bddc92 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h index 0e497a6154db..4a12133850f5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ #ifndef NFP6000_NFP6000_H #define NFP6000_NFP6000_H diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h index 40fb19939505..9a86ec11c5ba 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* * nfp_xpb.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index c8d0b1016a64..85d46f206b3c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp6000_pcie.c @@ -138,6 +108,7 @@ /* The number of explicit BARs to reserve. * Minimum is 0, maximum is 4 on the NFP6000. + * The NFP3800 can have only one per PF. */ #define NFP_PCIE_EXPLICIT_BARS 2 @@ -589,8 +560,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3), }; char status_msg[196] = {}; + int i, err, bars_free; struct nfp_bar *bar; - int i, bars_free; int expl_groups; char *msg, *end; @@ -643,6 +614,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { + int pf; + msg += snprintf(msg, end - msg, "0.0: General/MSI-X SRAM, "); atomic_inc(&bar->refcnt); bars_free--; @@ -651,22 +624,40 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000; - if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 || - nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) { - nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0); - } else { - int pf = nfp->pdev->devfn & 7; - + switch (nfp->pdev->device) { + case PCI_DEVICE_ID_NETRONOME_NFP3800: + pf = nfp->pdev->devfn & 7; nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf); + break; + case PCI_DEVICE_ID_NETRONOME_NFP4000: + case PCI_DEVICE_ID_NETRONOME_NFP5000: + case PCI_DEVICE_ID_NETRONOME_NFP6000: + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0); + break; + default: + dev_err(nfp->dev, "Unsupported device ID: %04hx!\n", + nfp->pdev->device); + err = -EINVAL; + goto err_unmap_bar0; } nfp->iomem.em = bar->iomem + NFP_PCIE_EM; } - if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 || - nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) - expl_groups = 4; - else + switch (nfp->pdev->device) { + case PCI_DEVICE_ID_NETRONOME_NFP3800: expl_groups = 1; + break; + case PCI_DEVICE_ID_NETRONOME_NFP4000: + case PCI_DEVICE_ID_NETRONOME_NFP5000: + case PCI_DEVICE_ID_NETRONOME_NFP6000: + expl_groups = 4; + break; + default: + dev_err(nfp->dev, "Unsupported device ID: %04hx!\n", + nfp->pdev->device); + err = -EINVAL; + goto err_unmap_bar0; + } /* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */ bar = &nfp->bar[1]; @@ -711,6 +702,11 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) dev_info(nfp->dev, "%sfree: %d/%d\n", status_msg, bars_free, nfp->bars); return 0; + +err_unmap_bar0: + if (nfp->bar[0].iomem) + iounmap(nfp->bar[0].iomem); + return err; } static void disable_bars(struct nfp6000_pcie *nfp) @@ -1327,7 +1323,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev) /* Finished with card initialization. */ dev_info(&pdev->dev, - "Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n"); + "Netronome Flow Processor NFP4000/NFP5000/NFP6000 PCIe Card Probe\n"); pcie_print_link_status(pdev); nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h index 245d8aaaa97d..6d1bffa6eac6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* * nfp6000_pcie.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h index 31fe92247f51..3d172e255693 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* * nfp_arm.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index c338d539fa96..2dd0f5842873 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_cpp.h @@ -56,9 +26,16 @@ dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) #define nfp_dbg(cpp, fmt, args...) \ dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_printk(level, cpp, fmt, args...) \ + dev_printk(level, nfp_cpp_device(cpp)->parent, \ + NFP_SUBSYS ": " fmt, ## args) #define PCI_64BIT_BAR_COUNT 3 +/* NFP hardware vendor/device ids. + */ +#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800 + #define NFP_CPP_NUM_TARGETS 16 /* Max size of area it should be safe to request */ #define NFP_CPP_SAFE_AREA_SIZE SZ_2M @@ -226,6 +203,7 @@ void nfp_cpp_free(struct nfp_cpp *cpp); u32 nfp_cpp_model(struct nfp_cpp *cpp); u16 nfp_cpp_interface(struct nfp_cpp *cpp); int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial); +unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp); struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 cpp_id, @@ -286,8 +264,8 @@ int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id, unsigned long long address, u64 value); u8 __iomem * -nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target, - u64 addr, unsigned long size, struct nfp_cpp_area **area); +nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, u32 cpp_id, u64 addr, + unsigned long size, struct nfp_cpp_area **area); struct nfp_cpp_mutex; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index 73de57a09800..94994a939277 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_cppcore.c @@ -75,6 +45,7 @@ struct nfp_cpp_resource { * @interface: chip interface id we are using to reach it * @serial: chip serial number * @imb_cat_table: CPP Mapping Table + * @mu_locality_lsb: MU access type bit offset * * Following fields use explicit locking: * @resource_list: NFP CPP resource list @@ -100,6 +71,7 @@ struct nfp_cpp { wait_queue_head_t waitq; u32 imb_cat_table[16]; + unsigned int mu_locality_lsb; struct mutex area_cache_mutex; struct list_head area_cache_list; @@ -266,6 +238,34 @@ int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial) return sizeof(cpp->serial); } +#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12) + +static int nfp_cpp_set_mu_locality_lsb(struct nfp_cpp *cpp) +{ + unsigned int mode, addr40; + u32 imbcppat; + int res; + + imbcppat = cpp->imb_cat_table[NFP_CPP_TARGET_MU]; + mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); + addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); + + res = nfp_cppat_mu_locality_lsb(mode, addr40); + if (res < 0) + return res; + cpp->mu_locality_lsb = res; + + return 0; +} + +unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp) +{ + return cpp->mu_locality_lsb; +} + /** * nfp_cpp_area_alloc_with_name() - allocate a new CPP area * @cpp: CPP device handle @@ -1241,6 +1241,12 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3, &mask[1]); + err = nfp_cpp_set_mu_locality_lsb(cpp); + if (err < 0) { + dev_err(parent, "Can't calculate MU locality bit offset\n"); + goto err_out; + } + dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n", nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp)); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c index 20bad05e2e92..3cfecf105bde 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_cpplib.c @@ -294,8 +264,7 @@ exit_release: * nfp_cpp_map_area() - Helper function to map an area * @cpp: NFP CPP handler * @name: Name for the area - * @domain: CPP domain - * @target: CPP target + * @cpp_id: CPP ID for operation * @addr: CPP address * @size: Size of the area * @area: Area handle (output) @@ -306,15 +275,12 @@ exit_release: * Return: Pointer to memory mapped area or ERR_PTR */ u8 __iomem * -nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target, - u64 addr, unsigned long size, struct nfp_cpp_area **area) +nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, u32 cpp_id, u64 addr, + unsigned long size, struct nfp_cpp_area **area) { u8 __iomem *res; - u32 dest; - - dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain); - *area = nfp_cpp_area_alloc_acquire(cpp, name, dest, addr, size); + *area = nfp_cpp_area_alloc_acquire(cpp, name, cpp_id, addr, size); if (!*area) goto err_eio; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c index 063a9a6243d6..f05dd34ab89f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM * after chip reset. diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c index 5f193fe2d69e..79e17943519e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* * nfp_mip.c diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index c88bf673cb76..7bc17b94ac60 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ #include <linux/delay.h> #include <linux/device.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c index 40510860341b..d4e02542e2e9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_nffw.c @@ -156,29 +126,6 @@ static u64 nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi) return (mip_off_hi & 0xFF) << 32 | le32_to_cpu(fi->mip_offset_lo); } -#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) -#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12) -#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 -#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12) - -static int nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp) -{ - unsigned int mode, addr40; - u32 xpbaddr, imbcppat; - int err; - - /* Hardcoded XPB IMB Base, island 0 */ - xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4; - err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat); - if (err < 0) - return err; - - mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); - addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); - - return nfp_cppat_mu_locality_lsb(mode, addr40); -} - static unsigned int nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr) { @@ -304,14 +251,7 @@ int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off) *off = nffw_fwinfo_mip_offset_get(fwinfo); if (nffw_fwinfo_mip_mu_da_get(fwinfo)) { - int locality_off; - - if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU) - return 0; - - locality_off = nfp_mip_mu_locality_lsb(state->cpp); - if (locality_off < 0) - return locality_off; + int locality_off = nfp_cpp_mu_locality_lsb(state->cpp); *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h index df599d5b6bb3..49a4d3f56b56 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_nffw.h @@ -61,10 +31,12 @@ void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size); /* Implemented in nfp_rtsym.c */ -#define NFP_RTSYM_TYPE_NONE 0 -#define NFP_RTSYM_TYPE_OBJECT 1 -#define NFP_RTSYM_TYPE_FUNCTION 2 -#define NFP_RTSYM_TYPE_ABS 3 +enum nfp_rtsym_type { + NFP_RTSYM_TYPE_NONE = 0, + NFP_RTSYM_TYPE_OBJECT = 1, + NFP_RTSYM_TYPE_FUNCTION = 2, + NFP_RTSYM_TYPE_ABS = 3, +}; #define NFP_RTSYM_TARGET_NONE 0 #define NFP_RTSYM_TARGET_LMEM -1 @@ -83,7 +55,7 @@ struct nfp_rtsym { const char *name; u64 addr; u64 size; - int type; + enum nfp_rtsym_type type; int target; int domain; }; @@ -98,6 +70,32 @@ const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx); const struct nfp_rtsym * nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name); +u64 nfp_rtsym_size(const struct nfp_rtsym *rtsym); +int __nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len); +int nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len); +int __nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 *value); +int nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 *value); +int __nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 *value); +int nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 *value); +int __nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len); +int nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len); +int __nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 value); +int nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 value); +int __nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 value); +int nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 value); + u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error); int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 2abee0fe3a7c..ce1577bbbd2a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_nsp.c @@ -87,6 +57,11 @@ #define NSP_CODE_MAJOR GENMASK(15, 12) #define NSP_CODE_MINOR GENMASK(11, 0) +#define NFP_FW_LOAD_RET_MAJOR GENMASK(15, 8) +#define NFP_FW_LOAD_RET_MINOR GENMASK(23, 16) + +#define NFP_HWINFO_LOOKUP_SIZE GENMASK(11, 0) + enum nfp_nsp_cmd { SPCODE_NOOP = 0, /* No operation */ SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ @@ -100,6 +75,8 @@ enum nfp_nsp_cmd { SPCODE_NSP_WRITE_FLASH = 11, /* Load and flash image from buffer */ SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */ SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ + SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */ + SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */ }; static const struct { @@ -127,6 +104,40 @@ struct nfp_nsp { void *entries; }; +/** + * struct nfp_nsp_command_arg - NFP command argument structure + * @code: NFP SP Command Code + * @timeout_sec:Timeout value to wait for completion in seconds + * @option: NFP SP Command Argument + * @buff_cpp: NFP SP Buffer CPP Address info + * @buff_addr: NFP SP Buffer Host address + * @error_cb: Callback for interpreting option if error occurred + */ +struct nfp_nsp_command_arg { + u16 code; + unsigned int timeout_sec; + u32 option; + u32 buff_cpp; + u64 buff_addr; + void (*error_cb)(struct nfp_nsp *state, u32 ret_val); +}; + +/** + * struct nfp_nsp_command_buf_arg - NFP command with buffer argument structure + * @arg: NFP command argument structure + * @in_buf: Buffer with data for input + * @in_size: Size of @in_buf + * @out_buf: Buffer for output data + * @out_size: Size of @out_buf + */ +struct nfp_nsp_command_buf_arg { + struct nfp_nsp_command_arg arg; + const void *in_buf; + unsigned int in_size; + void *out_buf; + unsigned int out_size; +}; + struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state) { return state->cpp; @@ -291,11 +302,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, /** * __nfp_nsp_command() - Execute a command on the NFP Service Processor * @state: NFP SP state - * @code: NFP SP Command Code - * @option: NFP SP Command Argument - * @buff_cpp: NFP SP Buffer CPP Address info - * @buff_addr: NFP SP Buffer Host address - * @timeout_sec:Timeout value to wait for completion in seconds + * @arg: NFP command argument structure * * Return: 0 for success with no result * @@ -308,8 +315,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, * -ETIMEDOUT if the NSP took longer than @timeout_sec seconds to complete */ static int -__nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, - u64 buff_addr, u32 timeout_sec) +__nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg) { u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command; struct nfp_cpp *cpp = state->cpp; @@ -326,22 +332,22 @@ __nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, if (err) return err; - if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) || - !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) { + if (!FIELD_FIT(NSP_BUFFER_CPP, arg->buff_cpp >> 8) || + !FIELD_FIT(NSP_BUFFER_ADDRESS, arg->buff_addr)) { nfp_err(cpp, "Host buffer out of reach %08x %016llx\n", - buff_cpp, buff_addr); + arg->buff_cpp, arg->buff_addr); return -EINVAL; } err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, - FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) | - FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr)); + FIELD_PREP(NSP_BUFFER_CPP, arg->buff_cpp >> 8) | + FIELD_PREP(NSP_BUFFER_ADDRESS, arg->buff_addr)); if (err < 0) return err; err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, - FIELD_PREP(NSP_COMMAND_OPTION, option) | - FIELD_PREP(NSP_COMMAND_CODE, code) | + FIELD_PREP(NSP_COMMAND_OPTION, arg->option) | + FIELD_PREP(NSP_COMMAND_CODE, arg->code) | FIELD_PREP(NSP_COMMAND_START, 1)); if (err < 0) return err; @@ -351,16 +357,16 @@ __nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, NSP_COMMAND_START, 0, NFP_NSP_TIMEOUT_DEFAULT); if (err) { nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n", - err, code); + err, arg->code); return err; } /* Wait for NSP_STATUS_BUSY to go to 0 */ err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_status, NSP_STATUS_BUSY, - 0, timeout_sec); + 0, arg->timeout_sec ?: NFP_NSP_TIMEOUT_DEFAULT); if (err) { nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n", - err, code); + err, arg->code); return err; } @@ -372,26 +378,28 @@ __nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, err = FIELD_GET(NSP_STATUS_RESULT, reg); if (err) { nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n", - -err, (int)ret_val, code); - nfp_nsp_print_extended_error(state, ret_val); + -err, (int)ret_val, arg->code); + if (arg->error_cb) + arg->error_cb(state, ret_val); + else + nfp_nsp_print_extended_error(state, ret_val); return -err; } return ret_val; } -static int -nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, - u64 buff_addr) +static int nfp_nsp_command(struct nfp_nsp *state, u16 code) { - return __nfp_nsp_command(state, code, option, buff_cpp, buff_addr, - NFP_NSP_TIMEOUT_DEFAULT); + const struct nfp_nsp_command_arg arg = { + .code = code, + }; + + return __nfp_nsp_command(state, &arg); } static int -__nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, - const void *in_buf, unsigned int in_size, void *out_buf, - unsigned int out_size, u32 timeout_sec) +nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg) { struct nfp_cpp *cpp = nsp->cpp; unsigned int max_size; @@ -401,7 +409,7 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, if (nsp->ver.minor < 13) { nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %hu.%hu)\n", - code, nsp->ver.major, nsp->ver.minor); + arg->arg.code, nsp->ver.major, nsp->ver.minor); return -EOPNOTSUPP; } @@ -412,10 +420,11 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, if (err < 0) return err; - max_size = max(in_size, out_size); + max_size = max(arg->in_size, arg->out_size); if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) { nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%llu < %u)\n", - code, FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, + arg->arg.code, + FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, max_size); return -EINVAL; } @@ -430,27 +439,30 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8; cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg); - if (in_buf && in_size) { - err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size); + if (arg->in_buf && arg->in_size) { + err = nfp_cpp_write(cpp, cpp_id, cpp_buf, + arg->in_buf, arg->in_size); if (err < 0) return err; } /* Zero out remaining part of the buffer */ - if (out_buf && out_size && out_size > in_size) { - memset(out_buf, 0, out_size - in_size); - err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size, - out_buf, out_size - in_size); + if (arg->out_buf && arg->out_size && arg->out_size > arg->in_size) { + memset(arg->out_buf, 0, arg->out_size - arg->in_size); + err = nfp_cpp_write(cpp, cpp_id, cpp_buf + arg->in_size, + arg->out_buf, arg->out_size - arg->in_size); if (err < 0) return err; } - ret = __nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf, - timeout_sec); + arg->arg.buff_cpp = cpp_id; + arg->arg.buff_addr = cpp_buf; + ret = __nfp_nsp_command(nsp, &arg->arg); if (ret < 0) return ret; - if (out_buf && out_size) { - err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size); + if (arg->out_buf && arg->out_size) { + err = nfp_cpp_read(cpp, cpp_id, cpp_buf, + arg->out_buf, arg->out_size); if (err < 0) return err; } @@ -458,16 +470,6 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, return ret; } -static int -nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, - const void *in_buf, unsigned int in_size, void *out_buf, - unsigned int out_size) -{ - return __nfp_nsp_command_buf(nsp, code, option, in_buf, in_size, - out_buf, out_size, - NFP_NSP_TIMEOUT_DEFAULT); -} - int nfp_nsp_wait(struct nfp_nsp *state) { const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ; @@ -479,7 +481,7 @@ int nfp_nsp_wait(struct nfp_nsp *state) for (;;) { const unsigned long start_time = jiffies; - err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0); + err = nfp_nsp_command(state, SPCODE_NOOP); if (err != -EAGAIN) break; @@ -501,53 +503,211 @@ int nfp_nsp_wait(struct nfp_nsp *state) int nfp_nsp_device_soft_reset(struct nfp_nsp *state) { - return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0); + return nfp_nsp_command(state, SPCODE_SOFT_RESET); } int nfp_nsp_mac_reinit(struct nfp_nsp *state) { - return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0); + return nfp_nsp_command(state, SPCODE_MAC_INIT); +} + +static void nfp_nsp_load_fw_extended_msg(struct nfp_nsp *state, u32 ret_val) +{ + static const char * const major_msg[] = { + /* 0 */ "Firmware from driver loaded", + /* 1 */ "Firmware from flash loaded", + /* 2 */ "Firmware loading failure", + }; + static const char * const minor_msg[] = { + /* 0 */ "", + /* 1 */ "no named partition on flash", + /* 2 */ "error reading from flash", + /* 3 */ "can not deflate", + /* 4 */ "not a trusted file", + /* 5 */ "can not parse FW file", + /* 6 */ "MIP not found in FW file", + /* 7 */ "null firmware name in MIP", + /* 8 */ "FW version none", + /* 9 */ "FW build number none", + /* 10 */ "no FW selection policy HWInfo key found", + /* 11 */ "static FW selection policy", + /* 12 */ "FW version has precedence", + /* 13 */ "different FW application load requested", + /* 14 */ "development build", + }; + unsigned int major, minor; + const char *level; + + major = FIELD_GET(NFP_FW_LOAD_RET_MAJOR, ret_val); + minor = FIELD_GET(NFP_FW_LOAD_RET_MINOR, ret_val); + + if (!nfp_nsp_has_stored_fw_load(state)) + return; + + /* Lower the message level in legacy case */ + if (major == 0 && (minor == 0 || minor == 10)) + level = KERN_DEBUG; + else if (major == 2) + level = KERN_ERR; + else + level = KERN_INFO; + + if (major >= ARRAY_SIZE(major_msg)) + nfp_printk(level, state->cpp, "FW loading status: %x\n", + ret_val); + else if (minor >= ARRAY_SIZE(minor_msg)) + nfp_printk(level, state->cpp, "%s, reason code: %d\n", + major_msg[major], minor); + else + nfp_printk(level, state->cpp, "%s%c %s\n", + major_msg[major], minor ? ',' : '.', + minor_msg[minor]); } int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw) { - return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, fw->size, fw->data, - fw->size, NULL, 0); + struct nfp_nsp_command_buf_arg load_fw = { + { + .code = SPCODE_FW_LOAD, + .option = fw->size, + .error_cb = nfp_nsp_load_fw_extended_msg, + }, + .in_buf = fw->data, + .in_size = fw->size, + }; + int ret; + + ret = nfp_nsp_command_buf(state, &load_fw); + if (ret < 0) + return ret; + + nfp_nsp_load_fw_extended_msg(state, ret); + return 0; } int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw) { - /* The flash time is specified to take a maximum of 70s so we add an - * additional factor to this spec time. - */ - u32 timeout_sec = 2.5 * 70; - - return __nfp_nsp_command_buf(state, SPCODE_NSP_WRITE_FLASH, fw->size, - fw->data, fw->size, NULL, 0, timeout_sec); + struct nfp_nsp_command_buf_arg write_flash = { + { + .code = SPCODE_NSP_WRITE_FLASH, + .option = fw->size, + /* The flash time is specified to take a maximum of 70s + * so we add an additional factor to this spec time. + */ + .timeout_sec = 2.5 * 70, + }, + .in_buf = fw->data, + .in_size = fw->size, + }; + + return nfp_nsp_command_buf(state, &write_flash); } int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size) { - return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0, - buf, size); + struct nfp_nsp_command_buf_arg eth_rescan = { + { + .code = SPCODE_ETH_RESCAN, + .option = size, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, ð_rescan); } int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, unsigned int size) { - return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size, - NULL, 0); + struct nfp_nsp_command_buf_arg eth_ctrl = { + { + .code = SPCODE_ETH_CONTROL, + .option = size, + }, + .in_buf = buf, + .in_size = size, + }; + + return nfp_nsp_command_buf(state, ð_ctrl); } int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size) { - return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0, - buf, size); + struct nfp_nsp_command_buf_arg identify = { + { + .code = SPCODE_NSP_IDENTIFY, + .option = size, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &identify); } int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, void *buf, unsigned int size) { - return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask, - NULL, 0, buf, size); + struct nfp_nsp_command_buf_arg sensors = { + { + .code = SPCODE_NSP_SENSORS, + .option = sensor_mask, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &sensors); +} + +int nfp_nsp_load_stored_fw(struct nfp_nsp *state) +{ + const struct nfp_nsp_command_arg arg = { + .code = SPCODE_FW_STORED, + .error_cb = nfp_nsp_load_fw_extended_msg, + }; + int ret; + + ret = __nfp_nsp_command(state, &arg); + if (ret < 0) + return ret; + + nfp_nsp_load_fw_extended_msg(state, ret); + return 0; +} + +static int +__nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg hwinfo_lookup = { + { + .code = SPCODE_HWINFO_LOOKUP, + .option = size, + }, + .in_buf = buf, + .in_size = size, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &hwinfo_lookup); +} + +int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) +{ + int err; + + size = min_t(u32, size, NFP_HWINFO_LOOKUP_SIZE); + + err = __nfp_nsp_hwinfo_lookup(state, buf, size); + if (err) + return err; + + if (strnlen(buf, size) == size) { + nfp_err(state->cpp, "NSP HWinfo value not NULL-terminated\n"); + return -EINVAL; + } + + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index f23d9e06f097..ff33ac54097a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ #ifndef NSP_NSP_H #define NSP_NSP_H 1 @@ -50,12 +20,24 @@ int nfp_nsp_device_soft_reset(struct nfp_nsp *state); int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_mac_reinit(struct nfp_nsp *state); +int nfp_nsp_load_stored_fw(struct nfp_nsp *state); +int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size); static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state) { return nfp_nsp_get_abi_ver_minor(state) > 20; } +static inline bool nfp_nsp_has_stored_fw_load(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 23; +} + +static inline bool nfp_nsp_has_hwinfo_lookup(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 24; +} + enum nfp_eth_interface { NFP_INTERFACE_NONE = 0, NFP_INTERFACE_SFP = 1, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c index 5d362f87af08..0997d127144f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ #include <linux/kernel.h> #include <linux/slab.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c index 7ca589660e4d..802c9224bb32 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* Authors: David Brunecz <david.brunecz@netronome.com> * Jakub Kicinski <jakub.kicinski@netronome.com> diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index d32af598da90..ce7492a6a98f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_resource.c diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 9e34216578da..75f012444796 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_rtsym.c @@ -39,6 +9,8 @@ * Espen Skoglund <espen.skoglund@netronome.com> * Francois H. Theron <francois.theron@netronome.com> */ + +#include <asm/unaligned.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -233,6 +205,229 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name) return NULL; } +u64 nfp_rtsym_size(const struct nfp_rtsym *sym) +{ + switch (sym->type) { + case NFP_RTSYM_TYPE_NONE: + pr_err("rtsym '%s': type NONE\n", sym->name); + return 0; + default: + pr_warn("rtsym '%s': unknown type: %d\n", sym->name, sym->type); + /* fall through */ + case NFP_RTSYM_TYPE_OBJECT: + case NFP_RTSYM_TYPE_FUNCTION: + return sym->size; + case NFP_RTSYM_TYPE_ABS: + return sizeof(u64); + } +} + +static int +nfp_rtsym_to_dest(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 *cpp_id, u64 *addr) +{ + if (sym->type != NFP_RTSYM_TYPE_OBJECT) { + nfp_err(cpp, "rtsym '%s': direct access to non-object rtsym\n", + sym->name); + return -EINVAL; + } + + *addr = sym->addr + off; + + if (sym->target == NFP_RTSYM_TARGET_EMU_CACHE) { + int locality_off = nfp_cpp_mu_locality_lsb(cpp); + + *addr &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); + *addr |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; + + *cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, action, token, + sym->domain); + } else if (sym->target < 0) { + nfp_err(cpp, "rtsym '%s': unhandled target encoding: %d\n", + sym->name, sym->target); + return -EINVAL; + } else { + *cpp_id = NFP_CPP_ISLAND_ID(sym->target, action, token, + sym->domain); + } + + return 0; +} + +int __nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len) +{ + u64 sym_size = nfp_rtsym_size(sym); + u32 cpp_id; + u64 addr; + int err; + + if (off > sym_size) { + nfp_err(cpp, "rtsym '%s': read out of bounds: off: %lld + len: %zd > size: %lld\n", + sym->name, off, len, sym_size); + return -ENXIO; + } + len = min_t(size_t, len, sym_size - off); + + if (sym->type == NFP_RTSYM_TYPE_ABS) { + u8 tmp[8]; + + put_unaligned_le64(sym->addr, tmp); + memcpy(buf, &tmp[off], len); + + return len; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_read(cpp, cpp_id, addr, buf, len); +} + +int nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len) +{ + return __nfp_rtsym_read(cpp, sym, NFP_CPP_ACTION_RW, 0, off, buf, len); +} + +int __nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 *value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 4 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': readl out of bounds: off: %lld + 4 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_readl(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 *value) +{ + return __nfp_rtsym_readl(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +int __nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 *value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 8 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': readq out of bounds: off: %lld + 8 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + if (sym->type == NFP_RTSYM_TYPE_ABS) { + *value = sym->addr; + return 0; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_readq(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 *value) +{ + return __nfp_rtsym_readq(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +int __nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len) +{ + u64 sym_size = nfp_rtsym_size(sym); + u32 cpp_id; + u64 addr; + int err; + + if (off > sym_size) { + nfp_err(cpp, "rtsym '%s': write out of bounds: off: %lld + len: %zd > size: %lld\n", + sym->name, off, len, sym_size); + return -ENXIO; + } + len = min_t(size_t, len, sym_size - off); + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_write(cpp, cpp_id, addr, buf, len); +} + +int nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len) +{ + return __nfp_rtsym_write(cpp, sym, NFP_CPP_ACTION_RW, 0, off, buf, len); +} + +int __nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 4 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': writel out of bounds: off: %lld + 4 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_writel(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 value) +{ + return __nfp_rtsym_writel(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +int __nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 8 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': writeq out of bounds: off: %lld + 8 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_writeq(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 value) +{ + return __nfp_rtsym_writeq(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + /** * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol * @rtbl: NFP RTsym table @@ -249,7 +444,7 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error) { const struct nfp_rtsym *sym; - u32 val32, id; + u32 val32; u64 val; int err; @@ -259,20 +454,18 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, goto exit; } - id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); - - switch (sym->size) { + switch (nfp_rtsym_size(sym)) { case 4: - err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32); + err = nfp_rtsym_readl(rtbl->cpp, sym, 0, &val32); val = val32; break; case 8: - err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val); + err = nfp_rtsym_readq(rtbl->cpp, sym, 0, &val); break; default: nfp_err(rtbl->cpp, - "rtsym '%s' unsupported or non-scalar size: %lld\n", - name, sym->size); + "rtsym '%s': unsupported or non-scalar size: %lld\n", + name, nfp_rtsym_size(sym)); err = -EINVAL; break; } @@ -303,25 +496,22 @@ int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, { const struct nfp_rtsym *sym; int err; - u32 id; sym = nfp_rtsym_lookup(rtbl, name); if (!sym) return -ENOENT; - id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); - - switch (sym->size) { + switch (nfp_rtsym_size(sym)) { case 4: - err = nfp_cpp_writel(rtbl->cpp, id, sym->addr, value); + err = nfp_rtsym_writel(rtbl->cpp, sym, 0, value); break; case 8: - err = nfp_cpp_writeq(rtbl->cpp, id, sym->addr, value); + err = nfp_rtsym_writeq(rtbl->cpp, sym, 0, value); break; default: nfp_err(rtbl->cpp, - "rtsym '%s' unsupported or non-scalar size: %lld\n", - name, sym->size); + "rtsym '%s': unsupported or non-scalar size: %lld\n", + name, nfp_rtsym_size(sym)); err = -EINVAL; break; } @@ -335,20 +525,29 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, { const struct nfp_rtsym *sym; u8 __iomem *mem; + u32 cpp_id; + u64 addr; + int err; sym = nfp_rtsym_lookup(rtbl, name); if (!sym) return (u8 __iomem *)ERR_PTR(-ENOENT); + err = nfp_rtsym_to_dest(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, + &cpp_id, &addr); + if (err) { + nfp_err(rtbl->cpp, "rtsym '%s': mapping failed\n", name); + return (u8 __iomem *)ERR_PTR(err); + } + if (sym->size < min_size) { - nfp_err(rtbl->cpp, "Symbol %s too small\n", name); + nfp_err(rtbl->cpp, "rtsym '%s': too small\n", name); return (u8 __iomem *)ERR_PTR(-EINVAL); } - mem = nfp_cpp_map_area(rtbl->cpp, id, sym->domain, sym->target, - sym->addr, sym->size, area); + mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr, sym->size, area); if (IS_ERR(mem)) { - nfp_err(rtbl->cpp, "Failed to map symbol %s: %ld\n", + nfp_err(rtbl->cpp, "rtysm '%s': failed to map: %ld\n", name, PTR_ERR(mem)); return mem; } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c index 4ea1e585d945..79470f198a62 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_target.c @@ -39,7 +9,11 @@ * Francois H. Theron <francois.theron@netronome.com> */ +#define pr_fmt(fmt) "NFP target: " fmt + #include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/printk.h> #include "nfp_cpp.h" @@ -733,8 +707,10 @@ int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address, u32 imb; int err; - if (target < 0 || target >= 16) + if (target < 0 || target >= 16) { + pr_err("Invalid CPP target: %d\n", target); return -EINVAL; + } if (island == 0) { /* Already translated */ @@ -753,8 +729,10 @@ int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address, err = nfp_cppat_addr_encode(cpp_target_address, island, target, ((imb >> 13) & 7), ((imb >> 12) & 1), ((imb >> 6) & 0x3f), ((imb >> 0) & 0x3f)); - if (err) + if (err) { + pr_err("Can't encode CPP address: %d\n", err); return err; + } *cpp_target_id = NFP_CPP_ID(target, NFP_CPP_ID_ACTION_of(cpp_island_id), diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c index d5b587fccaa3..aea8579206ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nic/main.c +++ b/drivers/net/ethernet/netronome/nfp/nic/main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ #include "../nfpcore/nfp_cpp.h" #include "../nfpcore/nfp_nsp.h" diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig index aa41e5f6e437..c73978474c4b 100644 --- a/drivers/net/ethernet/ni/Kconfig +++ b/drivers/net/ethernet/ni/Kconfig @@ -18,8 +18,9 @@ if NET_VENDOR_NI config NI_XGE_MANAGEMENT_ENET tristate "National Instruments XGE management enet support" - depends on ARCH_ZYNQ + depends on HAS_IOMEM && HAS_DMA select PHYLIB + select OF_MDIO if OF help Simple LAN device for debug or management purposes. Can support either 10G or 1G PHYs via SFP+ ports. diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 76efed058f33..0611f2335b4a 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -106,10 +106,10 @@ (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) struct nixge_hw_dma_bd { - u32 next; - u32 reserved1; - u32 phys; - u32 reserved2; + u32 next_lo; + u32 next_hi; + u32 phys_lo; + u32 phys_hi; u32 reserved3; u32 reserved4; u32 cntrl; @@ -119,11 +119,39 @@ struct nixge_hw_dma_bd { u32 app2; u32 app3; u32 app4; - u32 sw_id_offset; - u32 reserved5; + u32 sw_id_offset_lo; + u32 sw_id_offset_hi; u32 reserved6; }; +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define nixge_hw_dma_bd_set_addr(bd, field, addr) \ + do { \ + (bd)->field##_lo = lower_32_bits((addr)); \ + (bd)->field##_hi = upper_32_bits((addr)); \ + } while (0) +#else +#define nixge_hw_dma_bd_set_addr(bd, field, addr) \ + ((bd)->field##_lo = lower_32_bits((addr))) +#endif + +#define nixge_hw_dma_bd_set_phys(bd, addr) \ + nixge_hw_dma_bd_set_addr((bd), phys, (addr)) + +#define nixge_hw_dma_bd_set_next(bd, addr) \ + nixge_hw_dma_bd_set_addr((bd), next, (addr)) + +#define nixge_hw_dma_bd_set_offset(bd, addr) \ + nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr)) + +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define nixge_hw_dma_bd_get_addr(bd, field) \ + (dma_addr_t)((((u64)(bd)->field##_hi) << 32) | ((bd)->field##_lo)) +#else +#define nixge_hw_dma_bd_get_addr(bd, field) \ + (dma_addr_t)((bd)->field##_lo) +#endif + struct nixge_tx_skb { struct sk_buff *skb; dma_addr_t mapping; @@ -176,6 +204,15 @@ static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val) writel(val, priv->dma_regs + offset); } +static void nixge_dma_write_desc_reg(struct nixge_priv *priv, off_t offset, + dma_addr_t addr) +{ + writel(lower_32_bits(addr), priv->dma_regs + offset); +#ifdef CONFIG_PHYS_ADDR_T_64BIT + writel(upper_32_bits(addr), priv->dma_regs + offset + 4); +#endif +} + static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset) { return readl(priv->dma_regs + offset); @@ -202,13 +239,22 @@ static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset) static void nixge_hw_dma_bd_release(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); + dma_addr_t phys_addr; + struct sk_buff *skb; int i; for (i = 0; i < RX_BD_NUM; i++) { - dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys, - NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); - dev_kfree_skb((struct sk_buff *) - (priv->rx_bd_v[i].sw_id_offset)); + phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], + phys); + + dma_unmap_single(ndev->dev.parent, phys_addr, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + + skb = (struct sk_buff *)(uintptr_t) + nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], + sw_id_offset); + dev_kfree_skb(skb); } if (priv->rx_bd_v) @@ -231,6 +277,7 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); struct sk_buff *skb; + dma_addr_t phys; u32 cr; int i; @@ -259,27 +306,30 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) goto out; for (i = 0; i < TX_BD_NUM; i++) { - priv->tx_bd_v[i].next = priv->tx_bd_p + - sizeof(*priv->tx_bd_v) * - ((i + 1) % TX_BD_NUM); + nixge_hw_dma_bd_set_next(&priv->tx_bd_v[i], + priv->tx_bd_p + + sizeof(*priv->tx_bd_v) * + ((i + 1) % TX_BD_NUM)); } for (i = 0; i < RX_BD_NUM; i++) { - priv->rx_bd_v[i].next = priv->rx_bd_p + - sizeof(*priv->rx_bd_v) * - ((i + 1) % RX_BD_NUM); + nixge_hw_dma_bd_set_next(&priv->rx_bd_v[i], + priv->rx_bd_p + + sizeof(*priv->rx_bd_v) * + ((i + 1) % RX_BD_NUM)); skb = netdev_alloc_skb_ip_align(ndev, NIXGE_MAX_JUMBO_FRAME_SIZE); if (!skb) goto out; - priv->rx_bd_v[i].sw_id_offset = (u32)skb; - priv->rx_bd_v[i].phys = - dma_map_single(ndev->dev.parent, - skb->data, - NIXGE_MAX_JUMBO_FRAME_SIZE, - DMA_FROM_DEVICE); + nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], (uintptr_t)skb); + phys = dma_map_single(ndev->dev.parent, skb->data, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + + nixge_hw_dma_bd_set_phys(&priv->rx_bd_v[i], phys); + priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; } @@ -312,18 +362,18 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ - nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p); + nixge_dma_write_desc_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p); cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); - nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p + + nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p + (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting. */ - nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p); + nixge_dma_write_desc_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); @@ -451,7 +501,7 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct nixge_priv *priv = netdev_priv(ndev); struct nixge_hw_dma_bd *cur_p; struct nixge_tx_skb *tx_skb; - dma_addr_t tail_p; + dma_addr_t tail_p, cur_phys; skb_frag_t *frag; u32 num_frag; u32 ii; @@ -466,15 +516,16 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } - cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) + cur_phys = dma_map_single(ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, cur_phys)) goto drop; + nixge_hw_dma_bd_set_phys(cur_p, cur_phys); cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; tx_skb->skb = NULL; - tx_skb->mapping = cur_p->phys; + tx_skb->mapping = cur_phys; tx_skb->size = skb_headlen(skb); tx_skb->mapped_as_page = false; @@ -485,16 +536,17 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) tx_skb = &priv->tx_skb[priv->tx_bd_tail]; frag = &skb_shinfo(skb)->frags[ii]; - cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) + cur_phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, cur_phys)) goto frag_err; + nixge_hw_dma_bd_set_phys(cur_p, cur_phys); cur_p->cntrl = skb_frag_size(frag); tx_skb->skb = NULL; - tx_skb->mapping = cur_p->phys; + tx_skb->mapping = cur_phys; tx_skb->size = skb_frag_size(frag); tx_skb->mapped_as_page = true; } @@ -506,7 +558,7 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail; /* Start the transfer */ - nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p); + nixge_dma_write_desc_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p); ++priv->tx_bd_tail; priv->tx_bd_tail %= TX_BD_NUM; @@ -537,7 +589,7 @@ static int nixge_recv(struct net_device *ndev, int budget) struct nixge_priv *priv = netdev_priv(ndev); struct sk_buff *skb, *new_skb; struct nixge_hw_dma_bd *cur_p; - dma_addr_t tail_p = 0; + dma_addr_t tail_p = 0, cur_phys = 0; u32 packets = 0; u32 length = 0; u32 size = 0; @@ -549,13 +601,15 @@ static int nixge_recv(struct net_device *ndev, int budget) tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) * priv->rx_bd_ci; - skb = (struct sk_buff *)(cur_p->sw_id_offset); + skb = (struct sk_buff *)(uintptr_t) + nixge_hw_dma_bd_get_addr(cur_p, sw_id_offset); length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; if (length > NIXGE_MAX_JUMBO_FRAME_SIZE) length = NIXGE_MAX_JUMBO_FRAME_SIZE; - dma_unmap_single(ndev->dev.parent, cur_p->phys, + dma_unmap_single(ndev->dev.parent, + nixge_hw_dma_bd_get_addr(cur_p, phys), NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); @@ -579,16 +633,17 @@ static int nixge_recv(struct net_device *ndev, int budget) if (!new_skb) return packets; - cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, - NIXGE_MAX_JUMBO_FRAME_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) { + cur_phys = dma_map_single(ndev->dev.parent, new_skb->data, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, cur_phys)) { /* FIXME: bail out and clean up */ netdev_err(ndev, "Failed to map ...\n"); } + nixge_hw_dma_bd_set_phys(cur_p, cur_phys); cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; cur_p->status = 0; - cur_p->sw_id_offset = (u32)new_skb; + nixge_hw_dma_bd_set_offset(cur_p, (uintptr_t)new_skb); ++priv->rx_bd_ci; priv->rx_bd_ci %= RX_BD_NUM; @@ -599,7 +654,7 @@ static int nixge_recv(struct net_device *ndev, int budget) ndev->stats.rx_bytes += size; if (tail_p) - nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p); + nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p); return packets; } @@ -637,6 +692,7 @@ static irqreturn_t nixge_tx_irq(int irq, void *_ndev) struct nixge_priv *priv = netdev_priv(_ndev); struct net_device *ndev = _ndev; unsigned int status; + dma_addr_t phys; u32 cr; status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET); @@ -650,9 +706,11 @@ static irqreturn_t nixge_tx_irq(int irq, void *_ndev) return IRQ_NONE; } if (status & XAXIDMA_IRQ_ERROR_MASK) { + phys = nixge_hw_dma_bd_get_addr(&priv->tx_bd_v[priv->tx_bd_ci], + phys); + netdev_err(ndev, "DMA Tx error 0x%x\n", status); - netdev_err(ndev, "Current BD is at: 0x%x\n", - (priv->tx_bd_v[priv->tx_bd_ci]).phys); + netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ @@ -678,6 +736,7 @@ static irqreturn_t nixge_rx_irq(int irq, void *_ndev) struct nixge_priv *priv = netdev_priv(_ndev); struct net_device *ndev = _ndev; unsigned int status; + dma_addr_t phys; u32 cr; status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET); @@ -697,9 +756,10 @@ static irqreturn_t nixge_rx_irq(int irq, void *_ndev) return IRQ_NONE; } if (status & XAXIDMA_IRQ_ERROR_MASK) { + phys = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[priv->rx_bd_ci], + phys); netdev_err(ndev, "DMA Rx error 0x%x\n", status); - netdev_err(ndev, "Current BD is at: 0x%x\n", - (priv->rx_bd_v[priv->rx_bd_ci]).phys); + netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ @@ -735,10 +795,10 @@ static void nixge_dma_err_handler(unsigned long data) tx_skb = &lp->tx_skb[i]; nixge_tx_skb_unmap(lp, tx_skb); - cur_p->phys = 0; + nixge_hw_dma_bd_set_phys(cur_p, 0); cur_p->cntrl = 0; cur_p->status = 0; - cur_p->sw_id_offset = 0; + nixge_hw_dma_bd_set_offset(cur_p, 0); } for (i = 0; i < RX_BD_NUM; i++) { @@ -779,18 +839,18 @@ static void nixge_dma_err_handler(unsigned long data) /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ - nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); + nixge_dma_write_desc_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET); nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); - nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + + nixge_dma_write_desc_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting */ - nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); + nixge_dma_write_desc_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET); nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 08381ef8bdb4..25382f8fbb70 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -19,34 +19,18 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/crc32.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h> -#include <linux/ethtool.h> -#include <linux/mii.h> #include <linux/clk.h> -#include <linux/workqueue.h> -#include <linux/netdevice.h> +#include <linux/crc32.h> #include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/phy.h> -#include <linux/dma-mapping.h> -#include <linux/of.h> +#include <linux/module.h> #include <linux/of_net.h> -#include <linux/types.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> -#include <linux/io.h> #include <mach/board.h> -#include <mach/platform.h> #include <mach/hardware.h> +#include <mach/platform.h> #define MODNAME "lpc-eth" #define DRV_VERSION "1.00" @@ -797,8 +781,7 @@ static int lpc_mii_probe(struct net_device *ndev) return PTR_ERR(phydev); } - /* mask with MAC supported features */ - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); phydev->advertising = phydev->supported; @@ -1258,18 +1241,19 @@ static const struct net_device_ops lpc_netdev_ops = { static int lpc_eth_drv_probe(struct platform_device *pdev) { - struct resource *res; - struct net_device *ndev; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; struct netdata_local *pldat; - struct phy_device *phydev; + struct net_device *ndev; dma_addr_t dma_handle; + struct resource *res; int irq, ret; u32 tmp; /* Setup network interface for RMII or MII mode */ tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL); tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK; - if (lpc_phy_interface_mode(&pdev->dev) == PHY_INTERFACE_MODE_MII) + if (lpc_phy_interface_mode(dev) == PHY_INTERFACE_MODE_MII) tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS; else tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS; @@ -1279,7 +1263,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || irq < 0) { - dev_err(&pdev->dev, "error getting resources.\n"); + dev_err(dev, "error getting resources.\n"); ret = -ENXIO; goto err_exit; } @@ -1287,12 +1271,12 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) /* Allocate net driver data structure */ ndev = alloc_etherdev(sizeof(struct netdata_local)); if (!ndev) { - dev_err(&pdev->dev, "could not allocate device.\n"); + dev_err(dev, "could not allocate device.\n"); ret = -ENOMEM; goto err_exit; } - SET_NETDEV_DEV(ndev, &pdev->dev); + SET_NETDEV_DEV(ndev, dev); pldat = netdev_priv(ndev); pldat->pdev = pdev; @@ -1304,9 +1288,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) ndev->irq = irq; /* Get clock for the device */ - pldat->clk = clk_get(&pdev->dev, NULL); + pldat->clk = clk_get(dev, NULL); if (IS_ERR(pldat->clk)) { - dev_err(&pdev->dev, "error getting clock.\n"); + dev_err(dev, "error getting clock.\n"); ret = PTR_ERR(pldat->clk); goto err_out_free_dev; } @@ -1319,14 +1303,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) /* Map IO space */ pldat->net_base = ioremap(res->start, resource_size(res)); if (!pldat->net_base) { - dev_err(&pdev->dev, "failed to map registers\n"); + dev_err(dev, "failed to map registers\n"); ret = -ENOMEM; goto err_out_disable_clocks; } ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0, ndev->name, ndev); if (ret) { - dev_err(&pdev->dev, "error requesting interrupt.\n"); + dev_err(dev, "error requesting interrupt.\n"); goto err_out_iounmap; } @@ -1340,7 +1324,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t)); pldat->dma_buff_base_v = 0; - if (use_iram_for_net(&pldat->pdev->dev)) { + if (use_iram_for_net(dev)) { dma_handle = LPC32XX_IRAM_BASE; if (pldat->dma_buff_size <= lpc32xx_return_iram_size()) pldat->dma_buff_base_v = @@ -1351,7 +1335,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) } if (pldat->dma_buff_base_v == 0) { - ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) goto err_out_free_irq; @@ -1360,7 +1344,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) /* Allocate a chunk of memory for the DMA ethernet buffers and descriptors */ pldat->dma_buff_base_v = - dma_alloc_coherent(&pldat->pdev->dev, + dma_alloc_coherent(dev, pldat->dma_buff_size, &dma_handle, GFP_KERNEL); if (pldat->dma_buff_base_v == NULL) { @@ -1385,7 +1369,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) __lpc_get_mac(pldat, ndev->dev_addr); if (!is_valid_ether_addr(ndev->dev_addr)) { - const char *macaddr = of_get_mac_address(pdev->dev.of_node); + const char *macaddr = of_get_mac_address(np); if (macaddr) memcpy(ndev->dev_addr, macaddr, ETH_ALEN); } @@ -1415,7 +1399,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) ret = register_netdev(ndev); if (ret) { - dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); + dev_err(dev, "Cannot register net device, aborting.\n"); goto err_out_dma_unmap; } platform_set_drvdata(pdev, ndev); @@ -1427,19 +1411,17 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", res->start, ndev->irq); - phydev = ndev->phydev; - - device_init_wakeup(&pdev->dev, 1); - device_set_wakeup_enable(&pdev->dev, 0); + device_init_wakeup(dev, 1); + device_set_wakeup_enable(dev, 0); return 0; err_out_unregister_netdev: unregister_netdev(ndev); err_out_dma_unmap: - if (!use_iram_for_net(&pldat->pdev->dev) || + if (!use_iram_for_net(dev) || pldat->dma_buff_size > lpc32xx_return_iram_size()) - dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size, + dma_free_coherent(dev, pldat->dma_buff_size, pldat->dma_buff_base_v, pldat->dma_buff_base_p); err_out_free_irq: @@ -1534,13 +1516,11 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) } #endif -#ifdef CONFIG_OF static const struct of_device_id lpc_eth_match[] = { { .compatible = "nxp,lpc-eth" }, { } }; MODULE_DEVICE_TABLE(of, lpc_eth_match); -#endif static struct platform_driver lpc_eth_driver = { .probe = lpc_eth_drv_probe, @@ -1551,7 +1531,7 @@ static struct platform_driver lpc_eth_driver = { #endif .driver = { .name = MODNAME, - .of_match_table = of_match_ptr(lpc_eth_match), + .of_match_table = lpc_eth_match, }, }; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 59c70be22a84..7d9819d80e44 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1784,11 +1784,6 @@ static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev) return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } -static void netxen_io_resume(struct pci_dev *pdev) -{ - pci_cleanup_aer_uncorrect_error_status(pdev); -} - static void netxen_nic_shutdown(struct pci_dev *pdev) { struct netxen_adapter *adapter = pci_get_drvdata(pdev); @@ -3465,7 +3460,6 @@ netxen_free_ip_list(struct netxen_adapter *adapter, bool master) static const struct pci_error_handlers netxen_err_handler = { .error_detected = netxen_io_error_detected, .slot_reset = netxen_io_slot_reset, - .resume = netxen_io_resume, }; static struct pci_driver netxen_driver = { diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index a60e1c8d470a..d9a03aba0e02 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -623,6 +623,7 @@ struct qed_hwfn { void *unzip_buf; struct dbg_tools_data dbg_info; + void *dbg_user_info; /* PWM region specific data */ u16 wid_count; @@ -914,7 +915,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); /* Prototypes */ int qed_fill_dev_info(struct qed_dev *cdev, struct qed_dev_info *dev_info); -void qed_link_update(struct qed_hwfn *hwfn); +void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt); u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf); diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index f1977aa440e5..dc1c1b616084 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -40,7 +40,6 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> -#include <linux/bitops.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dev_api.h" diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index f5459de6d60a..8e8fa823d611 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -262,8 +262,9 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; - DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n", - app_prio_bitmap); + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "No action required, App TLV entry = 0x%x\n", + app_prio_bitmap); return false; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 1aa9fc1c5890..78a638ec7c0a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -3454,7 +3454,8 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STORM_REG_FILE) + IOR_SET_OFFSET(set_id); - buf[strlen(buf) - 1] = '0' + set_id; + if (strlen(buf) > 0) + buf[strlen(buf) - 1] = '0' + set_id; offset += qed_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, @@ -5563,35 +5564,6 @@ struct block_info { enum block_id id; }; -struct mcp_trace_format { - u32 data; -#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff -#define MCP_TRACE_FORMAT_MODULE_SHIFT 0 -#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 -#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16 -#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 -#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18 -#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 -#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20 -#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 -#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 -#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 -#define MCP_TRACE_FORMAT_LEN_SHIFT 24 - - char *format_str; -}; - -/* Meta data structure, generated by a perl script during MFW build. therefore, - * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl - * script. - */ -struct mcp_trace_meta { - u32 modules_num; - char **modules; - u32 formats_num; - struct mcp_trace_format *formats; -}; - /* REG fifo element */ struct reg_fifo_element { u64 data; @@ -5714,6 +5686,20 @@ struct igu_fifo_addr_data { enum igu_fifo_addr_types type; }; +struct mcp_trace_meta { + u32 modules_num; + char **modules; + u32 formats_num; + struct mcp_trace_format *formats; + bool is_allocated; +}; + +/* Debug Tools user data */ +struct dbg_tools_user_data { + struct mcp_trace_meta mcp_trace_meta; + const u32 *mcp_trace_user_meta_buf; +}; + /******************************** Constants **********************************/ #define MAX_MSG_LEN 1024 @@ -6137,15 +6123,6 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { /******************************** Variables **********************************/ -/* MCP Trace meta data array - used in case the dump doesn't contain the - * meta data (e.g. due to no NVRAM access). - */ -static struct user_dbg_array s_mcp_trace_meta_arr = { NULL, 0 }; - -/* Parsed MCP Trace meta data info, based on MCP trace meta array */ -static struct mcp_trace_meta s_mcp_trace_meta; -static bool s_mcp_trace_meta_valid; - /* Temporary buffer, used for print size calculations */ static char s_temp_buf[MAX_MSG_LEN]; @@ -6311,6 +6288,12 @@ static u32 qed_print_section_params(u32 *dump_buf, return dump_offset; } +static struct dbg_tools_user_data * +qed_dbg_get_user_data(struct qed_hwfn *p_hwfn) +{ + return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info; +} + /* Parses the idle check rules and returns the number of characters printed. * In case of parsing error, returns 0. */ @@ -6570,43 +6553,26 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf, return DBG_STATUS_OK; } -/* Frees the specified MCP Trace meta data */ -static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn, - struct mcp_trace_meta *meta) -{ - u32 i; - - s_mcp_trace_meta_valid = false; - - /* Release modules */ - if (meta->modules) { - for (i = 0; i < meta->modules_num; i++) - kfree(meta->modules[i]); - kfree(meta->modules); - } - - /* Release formats */ - if (meta->formats) { - for (i = 0; i < meta->formats_num; i++) - kfree(meta->formats[i].format_str); - kfree(meta->formats); - } -} - /* Allocates and fills MCP Trace meta data based on the specified meta data * dump buffer. * Returns debug status code. */ -static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, - const u32 *meta_buf, - struct mcp_trace_meta *meta) +static enum dbg_status +qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf) { - u8 *meta_buf_bytes = (u8 *)meta_buf; + struct dbg_tools_user_data *dev_user_data; u32 offset = 0, signature, i; + struct mcp_trace_meta *meta; + u8 *meta_buf_bytes; + + dev_user_data = qed_dbg_get_user_data(p_hwfn); + meta = &dev_user_data->mcp_trace_meta; + meta_buf_bytes = (u8 *)meta_buf; /* Free the previous meta before loading a new one. */ - if (s_mcp_trace_meta_valid) - qed_mcp_trace_free_meta(p_hwfn, meta); + if (meta->is_allocated) + qed_mcp_trace_free_meta_data(p_hwfn); memset(meta, 0, sizeof(*meta)); @@ -6674,7 +6640,7 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, format_len, format_ptr->format_str); } - s_mcp_trace_meta_valid = true; + meta->is_allocated = true; return DBG_STATUS_OK; } @@ -6687,21 +6653,26 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, * buffer. * data_size - size in bytes of data to parse. * parsed_buf - destination buffer for parsed data. - * parsed_bytes - size of parsed data in bytes. + * parsed_results_bytes - size of parsed data in bytes. */ -static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, +static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn, + u8 *trace_buf, u32 trace_buf_size, u32 data_offset, u32 data_size, char *parsed_buf, - u32 *parsed_bytes) + u32 *parsed_results_bytes) { + struct dbg_tools_user_data *dev_user_data; + struct mcp_trace_meta *meta; u32 param_mask, param_shift; enum dbg_status status; - *parsed_bytes = 0; + dev_user_data = qed_dbg_get_user_data(p_hwfn); + meta = &dev_user_data->mcp_trace_meta; + *parsed_results_bytes = 0; - if (!s_mcp_trace_meta_valid) + if (!meta->is_allocated) return DBG_STATUS_MCP_TRACE_BAD_DATA; status = DBG_STATUS_OK; @@ -6723,7 +6694,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, format_idx = header & MFW_TRACE_EVENTID_MASK; /* Skip message if its index doesn't exist in the meta data */ - if (format_idx >= s_mcp_trace_meta.formats_num) { + if (format_idx >= meta->formats_num) { u8 format_size = (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >> MFW_TRACE_PRM_SIZE_SHIFT); @@ -6738,7 +6709,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, continue; } - format_ptr = &s_mcp_trace_meta.formats[format_idx]; + format_ptr = &meta->formats[format_idx]; for (i = 0, param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, @@ -6783,19 +6754,20 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, return DBG_STATUS_MCP_TRACE_BAD_DATA; /* Print current message to results buffer */ - *parsed_bytes += - sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes), + *parsed_results_bytes += + sprintf(qed_get_buf_ptr(parsed_buf, + *parsed_results_bytes), "%s %-8s: ", s_mcp_trace_level_str[format_level], - s_mcp_trace_meta.modules[format_module]); - *parsed_bytes += - sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes), + meta->modules[format_module]); + *parsed_results_bytes += + sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes), format_ptr->format_str, params[0], params[1], params[2]); } /* Add string NULL terminator */ - (*parsed_bytes)++; + (*parsed_results_bytes)++; return status; } @@ -6803,24 +6775,25 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, /* Parses an MCP Trace dump buffer. * If result_buf is not NULL, the MCP Trace results are printed to it. * In any case, the required results buffer size is assigned to - * parsed_bytes. + * parsed_results_bytes. * The parsing status is returned. */ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, - char *parsed_buf, - u32 *parsed_bytes) + char *results_buf, + u32 *parsed_results_bytes, + bool free_meta_data) { const char *section_name, *param_name, *param_str_val; u32 data_size, trace_data_dwords, trace_meta_dwords; - u32 offset, results_offset, parsed_buf_bytes; + u32 offset, results_offset, results_buf_bytes; u32 param_num_val, num_section_params; struct mcp_trace *trace; enum dbg_status status; const u32 *meta_buf; u8 *trace_buf; - *parsed_bytes = 0; + *parsed_results_bytes = 0; /* Read global_params section */ dump_buf += qed_read_section_hdr(dump_buf, @@ -6831,7 +6804,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Print global params */ dump_buf += qed_print_section_params(dump_buf, num_section_params, - parsed_buf, &results_offset); + results_buf, &results_offset); /* Read trace_data section */ dump_buf += qed_read_section_hdr(dump_buf, @@ -6846,6 +6819,9 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Prepare trace info */ trace = (struct mcp_trace *)dump_buf; + if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + trace_buf = (u8 *)dump_buf + sizeof(*trace); offset = trace->trace_oldest; data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size); @@ -6865,31 +6841,39 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Choose meta data buffer */ if (!trace_meta_dwords) { /* Dump doesn't include meta data */ - if (!s_mcp_trace_meta_arr.ptr) + struct dbg_tools_user_data *dev_user_data = + qed_dbg_get_user_data(p_hwfn); + + if (!dev_user_data->mcp_trace_user_meta_buf) return DBG_STATUS_MCP_TRACE_NO_META; - meta_buf = s_mcp_trace_meta_arr.ptr; + + meta_buf = dev_user_data->mcp_trace_user_meta_buf; } else { /* Dump includes meta data */ meta_buf = dump_buf; } /* Allocate meta data memory */ - status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &s_mcp_trace_meta); + status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf); if (status != DBG_STATUS_OK) return status; - status = qed_parse_mcp_trace_buf(trace_buf, + status = qed_parse_mcp_trace_buf(p_hwfn, + trace_buf, trace->size, offset, data_size, - parsed_buf ? - parsed_buf + results_offset : + results_buf ? + results_buf + results_offset : NULL, - &parsed_buf_bytes); + &results_buf_bytes); if (status != DBG_STATUS_OK) return status; - *parsed_bytes = results_offset + parsed_buf_bytes; + if (free_meta_data) + qed_mcp_trace_free_meta_data(p_hwfn); + + *parsed_results_bytes = results_offset + results_buf_bytes; return DBG_STATUS_OK; } @@ -7361,6 +7345,16 @@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr) return DBG_STATUS_OK; } +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn) +{ + p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data), + GFP_KERNEL); + if (!p_hwfn->dbg_user_info) + return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + + return DBG_STATUS_OK; +} + const char *qed_dbg_get_status_str(enum dbg_status status) { return (status < @@ -7397,10 +7391,13 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, num_errors, num_warnings); } -void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size) +void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf) { - s_mcp_trace_meta_arr.ptr = data; - s_mcp_trace_meta_arr.size_in_dwords = size; + struct dbg_tools_user_data *dev_user_data = + qed_dbg_get_user_data(p_hwfn); + + dev_user_data->mcp_trace_user_meta_buf = meta_buf; } enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, @@ -7409,7 +7406,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size) { return qed_parse_mcp_trace_dump(p_hwfn, - dump_buf, NULL, results_buf_size); + dump_buf, NULL, results_buf_size, true); } enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, @@ -7421,20 +7418,61 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, - results_buf, &parsed_buf_size); + results_buf, &parsed_buf_size, true); +} + +enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf, + &parsed_buf_size, false); } -enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, +enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, u32 num_dumped_bytes, char *results_buf) { - u32 parsed_bytes; + u32 parsed_results_bytes; - return qed_parse_mcp_trace_buf(dump_buf, + return qed_parse_mcp_trace_buf(p_hwfn, + dump_buf, num_dumped_bytes, 0, num_dumped_bytes, - results_buf, &parsed_bytes); + results_buf, &parsed_results_bytes); +} + +/* Frees the specified MCP Trace meta data */ +void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn) +{ + struct dbg_tools_user_data *dev_user_data; + struct mcp_trace_meta *meta; + u32 i; + + dev_user_data = qed_dbg_get_user_data(p_hwfn); + meta = &dev_user_data->mcp_trace_meta; + if (!meta->is_allocated) + return; + + /* Release modules */ + if (meta->modules) { + for (i = 0; i < meta->modules_num; i++) + kfree(meta->modules[i]); + kfree(meta->modules); + } + + /* Release formats */ + if (meta->formats) { + for (i = 0; i < meta->formats_num; i++) + kfree(meta->formats[i].format_str); + kfree(meta->formats); + } + + meta->is_allocated = false; } enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 97f073fd3725..7ceb2b97538d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -144,6 +144,12 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn) qm_info->wfq_data = NULL; } +static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->dbg_user_info); + p_hwfn->dbg_user_info = NULL; +} + void qed_resc_free(struct qed_dev *cdev) { int i; @@ -183,6 +189,7 @@ void qed_resc_free(struct qed_dev *cdev) qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); + qed_dbg_user_data_free(p_hwfn); } } @@ -1083,6 +1090,10 @@ int qed_resc_alloc(struct qed_dev *cdev) rc = qed_dcbx_info_alloc(p_hwfn); if (rc) goto alloc_err; + + rc = qed_dbg_alloc_user_data(p_hwfn); + if (rc) + goto alloc_err; } cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); @@ -2668,6 +2679,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: link->speed.forced_speed = 10000; break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_20G: + link->speed.forced_speed = 20000; + break; case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: link->speed.forced_speed = 25000; break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index a71382687ef2..5c221ebaa7b3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -274,7 +274,8 @@ struct core_rx_start_ramrod_data { u8 mf_si_mcast_accept_all; struct core_rx_action_on_error action_on_error; u8 gsi_offload_flag; - u8 reserved[6]; + u8 wipe_inner_vlan_pri_en; + u8 reserved[5]; }; /* Ramrod data for rx queue stop ramrod */ @@ -351,7 +352,8 @@ struct core_tx_start_ramrod_data { __le16 pbl_size; __le16 qm_pq_id; u8 gsi_offload_flag; - u8 resrved[3]; + u8 vport_id; + u8 resrved[2]; }; /* Ramrod data for tx queue stop ramrod */ @@ -914,6 +916,16 @@ struct eth_rx_rate_limit { __le16 reserved1; }; +/* Update RSS indirection table entry command */ +struct eth_tstorm_rss_update_data { + u8 valid; + u8 vport_id; + u8 ind_table_index; + u8 reserved; + __le16 ind_table_value; + __le16 reserved1; +}; + struct eth_ustorm_per_pf_stat { struct regpair rcv_lb_ucast_bytes; struct regpair rcv_lb_mcast_bytes; @@ -1241,6 +1253,10 @@ struct rl_update_ramrod_data { u8 rl_id_first; u8 rl_id_last; u8 rl_dc_qcn_flg; + u8 dcqcn_reset_alpha_on_idle; + u8 rl_bc_stage_th; + u8 rl_timer_stage_th; + u8 reserved1; __le32 rl_bc_rate; __le16 rl_max_rate; __le16 rl_r_ai; @@ -1249,7 +1265,7 @@ struct rl_update_ramrod_data { __le32 dcqcn_k_us; __le32 dcqcn_timeuot_us; __le32 qcn_timeuot_us; - __le32 reserved[2]; + __le32 reserved2; }; /* Slowpath Element (SPQE) */ @@ -3322,6 +3338,25 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results); +/******************************* Data Types **********************************/ + +struct mcp_trace_format { + u32 data; +#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff +#define MCP_TRACE_FORMAT_MODULE_SHIFT 0 +#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 +#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16 +#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 +#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18 +#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 +#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20 +#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 +#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 +#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 +#define MCP_TRACE_FORMAT_LEN_SHIFT 24 + char *format_str; +}; + /******************************** Constants **********************************/ #define MAX_NAME_LEN 16 @@ -3337,6 +3372,13 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr); /** + * @brief qed_dbg_alloc_user_data - Allocates user debug data. + * + * @param p_hwfn - HW device data + */ +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn); + +/** * @brief qed_dbg_get_status_str - Returns a string for the specified status. * * @param status - a debug status code. @@ -3381,8 +3423,7 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, u32 *num_warnings); /** - * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace - * meta data. + * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data. * * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to * no NVRAM access). @@ -3390,7 +3431,8 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, * @param data - pointer to MCP Trace meta data * @param size - size of MCP Trace meta data in dwords */ -void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size); +void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf); /** * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size @@ -3425,19 +3467,45 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, char *results_buf); /** + * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and + * keeps the MCP trace meta data allocated, to support continuous MCP Trace + * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should + * be called to free the meta data. + * + * @param p_hwfn - HW device data + * @param dump_buf - mcp trace dump buffer, starting from the header. + * @param results_buf - buffer for printing the mcp trace results. + * + * @return error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf); + +/** * @brief print_mcp_trace_line - Prints MCP Trace results for a single line * + * @param p_hwfn - HW device data * @param dump_buf - mcp trace dump buffer, starting from the header. * @param num_dumped_bytes - number of bytes that were dumped. * @param results_buf - buffer for printing the mcp trace results. * * @return error if the parsing fails, ok otherwise. */ -enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, +enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, u32 num_dumped_bytes, char *results_buf); /** + * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data. + * Should be called after continuous MCP Trace parsing. + * + * @param p_hwfn - HW device data + */ +void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); + +/** * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size * for reg_fifo results (in bytes). * @@ -4303,154 +4371,161 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, (IRO[29].base + ((pf_id) * IRO[29].m1)) #define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size) +/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. + * Use eth_tstorm_rss_update_data for update. + */ +#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \ + (IRO[30].base + ((pf_id) * IRO[30].m1)) +#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size) + /* Xstorm queue zone */ #define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[30].base + ((queue_id) * IRO[30].m1)) -#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) + (IRO[31].base + ((queue_id) * IRO[31].m1)) +#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size) /* Ystorm cqe producer */ #define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[31].base + ((rss_id) * IRO[31].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size) + (IRO[32].base + ((rss_id) * IRO[32].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size) /* Ustorm cqe producer */ #define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[32].base + ((rss_id) * IRO[32].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size) + (IRO[33].base + ((rss_id) * IRO[33].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size) /* Ustorm grq producer */ #define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ - (IRO[33].base + ((pf_id) * IRO[33].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size) + (IRO[34].base + ((pf_id) * IRO[34].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size) /* Tstorm cmdq-cons of given command queue-id */ #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) + (IRO[35].base + ((cmdq_queue_id) * IRO[35].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size) /* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, * BDqueue-id. */ #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) -#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) + (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) /* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) -#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) + (IRO[37].base + ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size) /* Tstorm iSCSI RX stats */ #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) -#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) + (IRO[38].base + ((pf_id) * IRO[38].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) /* Mstorm iSCSI RX stats */ #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[38].base + ((pf_id) * IRO[38].m1)) -#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) + (IRO[39].base + ((pf_id) * IRO[39].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) /* Ustorm iSCSI RX stats */ #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[39].base + ((pf_id) * IRO[39].m1)) -#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) + (IRO[40].base + ((pf_id) * IRO[40].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size) /* Xstorm iSCSI TX stats */ #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[40].base + ((pf_id) * IRO[40].m1)) -#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) + (IRO[41].base + ((pf_id) * IRO[41].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) /* Ystorm iSCSI TX stats */ #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) -#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) + (IRO[42].base + ((pf_id) * IRO[42].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) /* Pstorm iSCSI TX stats */ #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[42].base + ((pf_id) * IRO[42].m1)) -#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) + (IRO[43].base + ((pf_id) * IRO[43].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size) /* Tstorm FCoE RX stats */ #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[43].base + ((pf_id) * IRO[43].m1)) -#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size) + (IRO[44].base + ((pf_id) * IRO[44].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size) /* Pstorm FCoE TX stats */ #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ - (IRO[44].base + ((pf_id) * IRO[44].m1)) -#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size) + (IRO[45].base + ((pf_id) * IRO[45].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size) /* Pstorm RDMA queue statistics */ #define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) -#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) + (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) /* Tstorm RDMA queue statistics */ #define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) -#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + (IRO[47].base + ((rdma_stat_counter_id) * IRO[47].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size) /* Xstorm error level for assert */ #define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[47].base + ((pf_id) * IRO[47].m1)) -#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size) + (IRO[48].base + ((pf_id) * IRO[48].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) /* Ystorm error level for assert */ #define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[48].base + ((pf_id) * IRO[48].m1)) -#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) + (IRO[49].base + ((pf_id) * IRO[49].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) /* Pstorm error level for assert */ #define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[49].base + ((pf_id) * IRO[49].m1)) -#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) + (IRO[50].base + ((pf_id) * IRO[50].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) /* Tstorm error level for assert */ #define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[50].base + ((pf_id) * IRO[50].m1)) -#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) + (IRO[51].base + ((pf_id) * IRO[51].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) /* Mstorm error level for assert */ #define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[51].base + ((pf_id) * IRO[51].m1)) -#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) + (IRO[52].base + ((pf_id) * IRO[52].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) /* Ustorm error level for assert */ #define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[52].base + ((pf_id) * IRO[52].m1)) -#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) + (IRO[53].base + ((pf_id) * IRO[53].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size) /* Xstorm iWARP rxmit stats */ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[53].base + ((pf_id) * IRO[53].m1)) -#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size) + (IRO[54].base + ((pf_id) * IRO[54].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size) /* Tstorm RoCE Event Statistics */ #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[54].base + ((roce_pf_id) * IRO[54].m1)) -#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size) + (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size) /* DCQCN Received Statistics */ #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ - (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size) + (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size) /* RoCE Error Statistics */ #define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ - (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) -#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size) + (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size) /* DCQCN Sent Statistics */ #define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) -#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size) + (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size) /* RoCE CQEs Statistics */ #define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ - (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) -#define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size) + (IRO[59].base + ((roce_pf_id) * IRO[59].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size) -static const struct iro iro_arr[59] = { +static const struct iro iro_arr[60] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb8, 0x88, 0x0, 0x0, 0x88}, {0x6530, 0x20, 0x0, 0x0, 0x20}, @@ -4461,14 +4536,14 @@ static const struct iro iro_arr[59] = { {0x84, 0x8, 0x0, 0x0, 0x2}, {0x4c48, 0x0, 0x0, 0x0, 0x78}, {0x3e38, 0x0, 0x0, 0x0, 0x78}, - {0x2b78, 0x0, 0x0, 0x0, 0x78}, + {0x3ef8, 0x0, 0x0, 0x0, 0x78}, {0x4c40, 0x0, 0x0, 0x0, 0x78}, {0x4998, 0x0, 0x0, 0x0, 0x78}, {0x7f50, 0x0, 0x0, 0x0, 0x78}, {0xa28, 0x8, 0x0, 0x0, 0x8}, {0x6210, 0x10, 0x0, 0x0, 0x10}, {0xb820, 0x30, 0x0, 0x0, 0x30}, - {0x96c0, 0x30, 0x0, 0x0, 0x30}, + {0xa990, 0x30, 0x0, 0x0, 0x30}, {0x4b68, 0x80, 0x0, 0x0, 0x40}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0x53a8, 0x80, 0x4, 0x0, 0x4}, @@ -4476,11 +4551,12 @@ static const struct iro iro_arr[59] = { {0x4ba8, 0x80, 0x0, 0x0, 0x20}, {0x8158, 0x40, 0x0, 0x0, 0x30}, {0xe770, 0x60, 0x0, 0x0, 0x60}, - {0x2d10, 0x80, 0x0, 0x0, 0x38}, - {0xf2b8, 0x78, 0x0, 0x0, 0x78}, + {0x4090, 0x80, 0x0, 0x0, 0x38}, + {0xfea8, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0xaf20, 0x0, 0x0, 0x0, 0xf0}, {0xb010, 0x8, 0x0, 0x0, 0x8}, + {0xc00, 0x8, 0x0, 0x0, 0x8}, {0x1f8, 0x8, 0x0, 0x0, 0x8}, {0xac0, 0x8, 0x0, 0x0, 0x8}, {0x2578, 0x8, 0x0, 0x0, 0x8}, @@ -4492,23 +4568,23 @@ static const struct iro iro_arr[59] = { {0x12908, 0x18, 0x0, 0x0, 0x10}, {0x11aa8, 0x40, 0x0, 0x0, 0x18}, {0xa588, 0x50, 0x0, 0x0, 0x20}, - {0x8700, 0x40, 0x0, 0x0, 0x28}, - {0x10300, 0x18, 0x0, 0x0, 0x10}, + {0x8f00, 0x40, 0x0, 0x0, 0x28}, + {0x10e30, 0x18, 0x0, 0x0, 0x10}, {0xde48, 0x48, 0x0, 0x0, 0x38}, - {0x10768, 0x20, 0x0, 0x0, 0x20}, - {0x2d48, 0x80, 0x0, 0x0, 0x10}, + {0x11298, 0x20, 0x0, 0x0, 0x20}, + {0x40c8, 0x80, 0x0, 0x0, 0x10}, {0x5048, 0x10, 0x0, 0x0, 0x10}, {0xc748, 0x8, 0x0, 0x0, 0x1}, - {0xa128, 0x8, 0x0, 0x0, 0x1}, - {0x10f00, 0x8, 0x0, 0x0, 0x1}, + {0xa928, 0x8, 0x0, 0x0, 0x1}, + {0x11a30, 0x8, 0x0, 0x0, 0x1}, {0xf030, 0x8, 0x0, 0x0, 0x1}, {0x13028, 0x8, 0x0, 0x0, 0x1}, {0x12c58, 0x8, 0x0, 0x0, 0x1}, {0xc9b8, 0x30, 0x0, 0x0, 0x10}, {0xed90, 0x28, 0x0, 0x0, 0x28}, - {0xa520, 0x18, 0x0, 0x0, 0x18}, - {0xa6a0, 0x8, 0x0, 0x0, 0x8}, - {0x13108, 0x8, 0x0, 0x0, 0x8}, + {0xad20, 0x18, 0x0, 0x0, 0x18}, + {0xaea0, 0x8, 0x0, 0x0, 0x8}, + {0x13c38, 0x8, 0x0, 0x0, 0x8}, {0x13c50, 0x18, 0x0, 0x0, 0x18}, }; @@ -5661,6 +5737,14 @@ enum eth_filter_type { MAX_ETH_FILTER_TYPE }; +/* inner to inner vlan priority translation configurations */ +struct eth_in_to_in_pri_map_cfg { + u8 inner_vlan_pri_remap_en; + u8 reserved[7]; + u8 non_rdma_in_to_in_pri_map[8]; + u8 rdma_in_to_in_pri_map[8]; +}; + /* Eth IPv4 Fragment Type */ enum eth_ipv4_frag_type { ETH_IPV4_NOT_FRAG, @@ -6018,6 +6102,14 @@ struct tx_queue_update_ramrod_data { struct regpair reserved1[5]; }; +/* Inner to Inner VLAN priority map update mode */ +enum update_in_to_in_pri_map_mode_enum { + ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED, + ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL, + ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL, + MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM +}; + /* Ramrod data for vport update ramrod */ struct vport_filter_update_ramrod_data { struct eth_filter_cmd_header filter_cmd_hdr; @@ -6048,7 +6140,8 @@ struct vport_start_ramrod_data { u8 zero_placement_offset; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[1]; + u8 wipe_inner_vlan_pri_en; + struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg; }; /* Ramrod data for vport stop ramrod */ @@ -6100,7 +6193,9 @@ struct vport_update_ramrod_data_cmn { u8 update_ctl_frame_checks_en_flg; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[15]; + u8 update_in_to_in_pri_map_mode; + u8 in_to_in_pri_map[8]; + u8 reserved[6]; }; struct vport_update_ramrod_mcast { @@ -6929,11 +7024,6 @@ struct mstorm_rdma_task_st_ctx { struct regpair temp[4]; }; -/* The roce task context of Ustorm */ -struct ustorm_rdma_task_st_ctx { - struct regpair temp[2]; -}; - struct e4_ustorm_rdma_task_ag_ctx { u8 reserved; u8 state; @@ -7007,8 +7097,6 @@ struct e4_rdma_task_context { struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; struct mstorm_rdma_task_st_ctx mstorm_st_context; struct rdif_task_context rdif_context; - struct ustorm_rdma_task_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2]; struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; }; @@ -7388,7 +7476,7 @@ struct e4_ustorm_rdma_conn_ag_ctx { #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; - u8 byte3; + u8 nvmf_only; __le16 conn_dpi; __le16 word1; __le32 cq_cons; @@ -7831,7 +7919,12 @@ struct roce_create_qp_req_ramrod_data { struct regpair qp_handle_for_cqe; struct regpair qp_handle_for_async; u8 stats_counter_id; - u8 reserved3[7]; + u8 reserved3[6]; + u8 flags2; +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x7F +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 1 __le16 regular_latency_phy_queue; __le16 dpi; }; @@ -7954,6 +8047,7 @@ enum roce_event_opcode { ROCE_EVENT_DESTROY_QP, ROCE_EVENT_CREATE_UD_QP, ROCE_EVENT_DESTROY_UD_QP, + ROCE_EVENT_FUNC_UPDATE, MAX_ROCE_EVENT_OPCODE }; @@ -7962,7 +8056,13 @@ struct roce_init_func_params { u8 ll2_queue_id; u8 cnp_vlan_priority; u8 cnp_dscp; - u8 reserved; + u8 flags; +#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 +#define ROCE_INIT_FUNC_PARAMS_RESERVED0_MASK 0x3F +#define ROCE_INIT_FUNC_PARAMS_RESERVED0_SHIFT 2 __le32 cnp_send_timeout; __le16 rl_offset; u8 rl_count_log; @@ -8109,9 +8209,24 @@ enum roce_ramrod_cmd_id { ROCE_RAMROD_DESTROY_QP, ROCE_RAMROD_CREATE_UD_QP, ROCE_RAMROD_DESTROY_UD_QP, + ROCE_RAMROD_FUNC_UPDATE, MAX_ROCE_RAMROD_CMD_ID }; +/* RoCE func init ramrod data */ +struct roce_update_func_params { + u8 cnp_vlan_priority; + u8 cnp_dscp; + __le16 flags; +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 +#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_MASK 0x3FFF +#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_SHIFT 2 + __le32 cnp_send_timeout; +}; + struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { u8 reserved0; u8 state; @@ -12092,11 +12207,56 @@ struct public_port { u32 transceiver_data; #define ETH_TRANSCEIVER_STATE_MASK 0x000000FF #define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000 +#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000 #define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 #define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 #define ETH_TRANSCEIVER_STATE_VALID 0x00000003 #define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 - +#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00 +#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8 +#define ETH_TRANSCEIVER_TYPE_NONE 0x00 +#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xFF +#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01 +#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02 +#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03 +#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04 +#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05 +#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06 +#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07 +#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08 +#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09 +#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a +#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b +#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c +#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d +#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e +#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f +#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10 +#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11 +#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12 +#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 +#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14 +#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15 +#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16 +#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17 +#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18 +#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19 +#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a +#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b +#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c +#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d +#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e +#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f +#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 +#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 +#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 u32 wol_info; u32 wol_pkt_len; u32 wol_pkt_details; @@ -12161,7 +12321,7 @@ struct public_func { #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 u32 status; -#define FUNC_STATUS_VLINK_DOWN 0x00000001 +#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001 u32 mac_upper; #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff @@ -12583,6 +12743,7 @@ struct public_drv_mb { #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 +#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 @@ -12635,6 +12796,7 @@ struct public_drv_mb { /* get MFW feature support response */ #define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 +#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) @@ -13040,6 +13202,7 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 @@ -13050,6 +13213,7 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 #define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 #define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3 #define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 #define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 #define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 @@ -13080,6 +13244,13 @@ struct nvm_cfg1_port { u32 transceiver_00; u32 device_ids; u32 board_cfg; +#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF +#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 +#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 +#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 +#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 u32 mnm_10g_cap; u32 mnm_10g_ctrl; u32 mnm_10g_misc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index af3a28ec04eb..0f0aba793352 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -228,7 +228,7 @@ static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == - QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)", + QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)", GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); out: diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index e860bdf0f752..beb8e5d6401a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -935,9 +935,8 @@ qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) } spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); - list_del(&ep->list_entry); - list_add_tail(&ep->list_entry, - &p_hwfn->p_rdma_info->iwarp.ep_free_list); + list_move_tail(&ep->list_entry, + &p_hwfn->p_rdma_info->iwarp.ep_free_list); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); } @@ -2270,8 +2269,8 @@ static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn) if (rc == -EBUSY) break; - list_del(&mpa_buf->list_entry); - list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list); + list_move_tail(&mpa_buf->list_entry, + &iwarp_info->mpa_buf_list); if (rc) { /* different error, don't continue */ DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 14ac9cab2653..aa633381aa47 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -63,8 +63,8 @@ #include "qed_sp.h" #include "qed_rdma.h" -#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred) -#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred) +#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered) +#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered) #define QED_LL2_TX_SIZE (256) #define QED_LL2_RX_SIZE (4096) @@ -796,7 +796,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, tx_pkt.vlan = p_buffer->vlan; tx_pkt.bd_flags = bd_flags; tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w; - tx_pkt.tx_dest = p_ll2_conn->tx_dest; + switch (p_ll2_conn->tx_dest) { + case CORE_TX_DEST_NW: + tx_pkt.tx_dest = QED_LL2_TX_DEST_NW; + break; + case CORE_TX_DEST_LB: + tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; + break; + case CORE_TX_DEST_DROP: + default: + tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; + break; + } tx_pkt.first_frag = first_frag; tx_pkt.first_frag_len = p_buffer->packet_length; tx_pkt.cookie = p_buffer; @@ -1404,7 +1415,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) &p_hwfn->p_ll2_info[i], &p_ll2_info->rx_queue.rx_sb_index, &p_ll2_info->rx_queue.p_fw_cons); - p_ll2_info->rx_queue.b_cb_registred = true; + p_ll2_info->rx_queue.b_cb_registered = true; } if (data->input.tx_num_desc) { @@ -1413,7 +1424,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) &p_hwfn->p_ll2_info[i], &p_ll2_info->tx_queue.tx_sb_index, &p_ll2_info->tx_queue.p_fw_cons); - p_ll2_info->tx_queue.b_cb_registred = true; + p_ll2_info->tx_queue.b_cb_registered = true; } *data->p_connection_handle = i; @@ -1929,7 +1940,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) /* Stop Tx & Rx of connection, if needed */ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { - p_ll2_conn->tx_queue.b_cb_registred = false; + p_ll2_conn->tx_queue.b_cb_registered = false; smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); if (rc) @@ -1940,7 +1951,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) } if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { - p_ll2_conn->rx_queue.b_cb_registred = false; + p_ll2_conn->rx_queue.b_cb_registered = false; smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); if (rc) diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index f65817012e97..1a5c1ae01474 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -79,7 +79,7 @@ struct qed_ll2_rx_queue { struct qed_chain rxq_chain; struct qed_chain rcq_chain; u8 rx_sb_index; - bool b_cb_registred; + bool b_cb_registered; __le16 *p_fw_cons; struct list_head active_descq; struct list_head free_descq; @@ -93,7 +93,7 @@ struct qed_ll2_tx_queue { spinlock_t lock; struct qed_chain txq_chain; u8 tx_sb_index; - bool b_cb_registred; + bool b_cb_registered; __le16 *p_fw_cons; struct list_head active_descq; struct list_head free_descq; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 2094d86a7a08..35fd0db6a677 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -58,6 +58,7 @@ #include "qed_iscsi.h" #include "qed_mcp.h" +#include "qed_reg_addr.h" #include "qed_hw.h" #include "qed_selftest.h" #include "qed_debug.h" @@ -1304,6 +1305,7 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) struct qed_hwfn *hwfn; struct qed_mcp_link_params *link_params; struct qed_ptt *ptt; + u32 sup_caps; int rc; if (!cdev) @@ -1330,23 +1332,50 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) link_params->speed.autoneg = params->autoneg; if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { link_params->speed.advertised_speeds = 0; - if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) || - (params->adv_speeds & QED_LM_1000baseT_Full_BIT)) + sup_caps = QED_LM_1000baseT_Full_BIT | + QED_LM_1000baseKX_Full_BIT | + QED_LM_1000baseX_Full_BIT; + if (params->adv_speeds & sup_caps) link_params->speed.advertised_speeds |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; - if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT) + sup_caps = QED_LM_10000baseT_Full_BIT | + QED_LM_10000baseKR_Full_BIT | + QED_LM_10000baseKX4_Full_BIT | + QED_LM_10000baseR_FEC_BIT | + QED_LM_10000baseCR_Full_BIT | + QED_LM_10000baseSR_Full_BIT | + QED_LM_10000baseLR_Full_BIT | + QED_LM_10000baseLRM_Full_BIT; + if (params->adv_speeds & sup_caps) link_params->speed.advertised_speeds |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; - if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT) + if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; + sup_caps = QED_LM_25000baseKR_Full_BIT | + QED_LM_25000baseCR_Full_BIT | + QED_LM_25000baseSR_Full_BIT; + if (params->adv_speeds & sup_caps) link_params->speed.advertised_speeds |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; - if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT) + sup_caps = QED_LM_40000baseLR4_Full_BIT | + QED_LM_40000baseKR4_Full_BIT | + QED_LM_40000baseCR4_Full_BIT | + QED_LM_40000baseSR4_Full_BIT; + if (params->adv_speeds & sup_caps) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; - if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + sup_caps = QED_LM_50000baseKR2_Full_BIT | + QED_LM_50000baseCR2_Full_BIT | + QED_LM_50000baseSR2_Full_BIT; + if (params->adv_speeds & sup_caps) link_params->speed.advertised_speeds |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; - if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT) + sup_caps = QED_LM_100000baseKR4_Full_BIT | + QED_LM_100000baseSR4_Full_BIT | + QED_LM_100000baseCR4_Full_BIT | + QED_LM_100000baseLR4_ER4_Full_BIT; + if (params->adv_speeds & sup_caps) link_params->speed.advertised_speeds |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; } @@ -1459,12 +1488,149 @@ static int qed_get_link_data(struct qed_hwfn *hwfn, return 0; } +static void qed_fill_link_capability(struct qed_hwfn *hwfn, + struct qed_ptt *ptt, u32 capability, + u32 *if_capability) +{ + u32 media_type, tcvr_state, tcvr_type; + u32 speed_mask, board_cfg; + + if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) + media_type = MEDIA_UNSPECIFIED; + + if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) + tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; + + if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) + speed_mask = 0xFFFFFFFF; + + if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) + board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; + + DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, + "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", + media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); + + switch (media_type) { + case MEDIA_DA_TWINAX: + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) + *if_capability |= QED_LM_20000baseKR2_Full_BIT; + /* For DAC media multiple speed capabilities are supported*/ + capability = capability & speed_mask; + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + *if_capability |= QED_LM_1000baseKX_Full_BIT; + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + *if_capability |= QED_LM_10000baseCR_Full_BIT; + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + *if_capability |= QED_LM_40000baseCR4_Full_BIT; + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + *if_capability |= QED_LM_25000baseCR_Full_BIT; + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + *if_capability |= QED_LM_50000baseCR2_Full_BIT; + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) + *if_capability |= QED_LM_100000baseCR4_Full_BIT; + break; + case MEDIA_BASE_T: + if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { + *if_capability |= QED_LM_1000baseT_Full_BIT; + } + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { + *if_capability |= QED_LM_10000baseT_Full_BIT; + } + } + if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { + if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET) + *if_capability |= QED_LM_1000baseT_Full_BIT; + if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET) + *if_capability |= QED_LM_10000baseT_Full_BIT; + } + break; + case MEDIA_SFP_1G_FIBER: + case MEDIA_SFPP_10G_FIBER: + case MEDIA_XFP_FIBER: + case MEDIA_MODULE_FIBER: + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { + if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) || + (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX)) + *if_capability |= QED_LM_1000baseKX_Full_BIT; + } + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { + if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR) + *if_capability |= QED_LM_10000baseSR_Full_BIT; + if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR) + *if_capability |= QED_LM_10000baseLR_Full_BIT; + if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM) + *if_capability |= QED_LM_10000baseLRM_Full_BIT; + if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER) + *if_capability |= QED_LM_10000baseR_FEC_BIT; + } + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) + *if_capability |= QED_LM_20000baseKR2_Full_BIT; + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) { + if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR) + *if_capability |= QED_LM_25000baseSR_Full_BIT; + } + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) { + if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4) + *if_capability |= QED_LM_40000baseLR4_Full_BIT; + if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4) + *if_capability |= QED_LM_40000baseSR4_Full_BIT; + } + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + *if_capability |= QED_LM_50000baseKR2_Full_BIT; + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) { + if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4) + *if_capability |= QED_LM_100000baseSR4_Full_BIT; + } + + break; + case MEDIA_KR: + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) + *if_capability |= QED_LM_20000baseKR2_Full_BIT; + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + *if_capability |= QED_LM_1000baseKX_Full_BIT; + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + *if_capability |= QED_LM_10000baseKR_Full_BIT; + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + *if_capability |= QED_LM_25000baseKR_Full_BIT; + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + *if_capability |= QED_LM_40000baseKR4_Full_BIT; + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + *if_capability |= QED_LM_50000baseKR2_Full_BIT; + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) + *if_capability |= QED_LM_100000baseKR4_Full_BIT; + break; + case MEDIA_UNSPECIFIED: + case MEDIA_NOT_PRESENT: + DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, + "Unknown media and transceiver type;\n"); + break; + } +} + static void qed_fill_link(struct qed_hwfn *hwfn, + struct qed_ptt *ptt, struct qed_link_output *if_link) { + struct qed_mcp_link_capabilities link_caps; struct qed_mcp_link_params params; struct qed_mcp_link_state link; - struct qed_mcp_link_capabilities link_caps; u32 media_type; memset(if_link, 0, sizeof(*if_link)); @@ -1495,52 +1661,20 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if_link->advertised_caps |= QED_LM_Autoneg_BIT; else if_link->advertised_caps &= ~QED_LM_Autoneg_BIT; - if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) - if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT | - QED_LM_1000baseT_Full_BIT; - if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) - if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT; - if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) - if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT; - if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) - if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT; - if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT; - if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) - if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT; - - if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) - if_link->supported_caps |= QED_LM_1000baseT_Half_BIT | - QED_LM_1000baseT_Full_BIT; - if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) - if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT; - if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) - if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT; - if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) - if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT; - if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT; - if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) - if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT; + + /* Fill link advertised capability*/ + qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, + &if_link->advertised_caps); + /* Fill link supported capability*/ + qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, + &if_link->supported_caps); if (link.link_up) if_link->speed = link.speed; /* TODO - fill duplex properly */ if_link->duplex = DUPLEX_FULL; - qed_mcp_get_media_type(hwfn->cdev, &media_type); + qed_mcp_get_media_type(hwfn, ptt, &media_type); if_link->port = qed_get_port_type(media_type); if_link->autoneg = params.speed.autoneg; @@ -1553,12 +1687,13 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; /* Link partner capabilities */ - if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD) - if_link->lp_caps |= QED_LM_1000baseT_Half_BIT; - if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD) + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_1G_FD) if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G) + if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT; if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) @@ -1596,21 +1731,34 @@ static void qed_fill_link(struct qed_hwfn *hwfn, static void qed_get_current_link(struct qed_dev *cdev, struct qed_link_output *if_link) { + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; int i; - qed_fill_link(&cdev->hwfns[0], if_link); + hwfn = &cdev->hwfns[0]; + if (IS_PF(cdev)) { + ptt = qed_ptt_acquire(hwfn); + if (ptt) { + qed_fill_link(hwfn, ptt, if_link); + qed_ptt_release(hwfn, ptt); + } else { + DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); + } + } else { + qed_fill_link(hwfn, NULL, if_link); + } for_each_hwfn(cdev, i) qed_inform_vf_link_state(&cdev->hwfns[i]); } -void qed_link_update(struct qed_hwfn *hwfn) +void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) { void *cookie = hwfn->cdev->ops_cookie; struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; struct qed_link_output if_link; - qed_fill_link(hwfn, &if_link); + qed_fill_link(hwfn, ptt, &if_link); qed_inform_vf_link_state(hwfn); if (IS_LEAD_HWFN(hwfn) && cookie) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 58c7eb9d8e1b..f40f654398a0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1247,6 +1247,52 @@ static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn, p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV; } +static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct public_func *p_data, int pfid) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 func_addr; + u32 i, size; + + func_addr = SECTION_ADDR(mfw_path_offsize, pfid); + memset(p_data, 0, sizeof(*p_data)); + + size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); + for (i = 0; i < size / sizeof(u32); i++) + ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, + func_addr + (i << 2)); + return size; +} + +static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, + struct public_func *p_shmem_info) +{ + struct qed_mcp_function_info *p_info; + + p_info = &p_hwfn->mcp_info->func_info; + + p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config, + FUNC_MF_CFG_MIN_BW); + if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { + DP_INFO(p_hwfn, + "bandwidth minimum out of bounds [%02x]. Set to 1\n", + p_info->bandwidth_min); + p_info->bandwidth_min = 1; + } + + p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config, + FUNC_MF_CFG_MAX_BW); + if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { + DP_INFO(p_hwfn, + "bandwidth maximum out of bounds [%02x]. Set to 100\n", + p_info->bandwidth_max); + p_info->bandwidth_max = 100; + } +} + static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_reset) { @@ -1274,10 +1320,29 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, goto out; } - if (p_hwfn->b_drv_link_init) - p_link->link_up = !!(status & LINK_STATUS_LINK_UP); - else + if (p_hwfn->b_drv_link_init) { + /* Link indication with modern MFW arrives as per-PF + * indication. + */ + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { + struct public_func shmem_info; + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + p_link->link_up = !!(shmem_info.status & + FUNC_STATUS_VIRTUAL_LINK_UP); + qed_read_pf_bandwidth(p_hwfn, &shmem_info); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Virtual link_up = %d\n", p_link->link_up); + } else { + p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Physical link_up = %d\n", p_link->link_up); + } + } else { p_link->link_up = false; + } p_link->full_duplex = true; switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { @@ -1382,7 +1447,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link); - qed_link_update(p_hwfn); + qed_link_update(p_hwfn, p_ptt); out: spin_unlock_bh(&p_hwfn->mcp_info->link_lock); } @@ -1504,53 +1569,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); } -static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, - struct public_func *p_shmem_info) -{ - struct qed_mcp_function_info *p_info; - - p_info = &p_hwfn->mcp_info->func_info; - - p_info->bandwidth_min = (p_shmem_info->config & - FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT; - if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { - DP_INFO(p_hwfn, - "bandwidth minimum out of bounds [%02x]. Set to 1\n", - p_info->bandwidth_min); - p_info->bandwidth_min = 1; - } - - p_info->bandwidth_max = (p_shmem_info->config & - FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT; - if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { - DP_INFO(p_hwfn, - "bandwidth maximum out of bounds [%02x]. Set to 100\n", - p_info->bandwidth_max); - p_info->bandwidth_max = 100; - } -} - -static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct public_func *p_data, int pfid) -{ - u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, - PUBLIC_FUNC); - u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); - u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); - u32 i, size; - - memset(p_data, 0, sizeof(*p_data)); - - size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); - for (i = 0; i < size / sizeof(u32); i++) - ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, - func_addr + (i << 2)); - return size; -} - static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_function_info *p_info; @@ -1849,12 +1867,12 @@ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, return 0; } -int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) +int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_media_type) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; - struct qed_ptt *p_ptt; + *p_media_type = MEDIA_UNSPECIFIED; - if (IS_VF(cdev)) + if (IS_VF(p_hwfn->cdev)) return -EINVAL; if (!qed_mcp_is_init(p_hwfn)) { @@ -1862,16 +1880,195 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) return -EBUSY; } - *p_media_type = MEDIA_UNSPECIFIED; + if (!p_ptt) { + *p_media_type = MEDIA_UNSPECIFIED; + return -EINVAL; + } - p_ptt = qed_ptt_acquire(p_hwfn); - if (!p_ptt) + *p_media_type = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + media_type)); + + return 0; +} + +int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_transceiver_state, + u32 *p_transceiver_type) +{ + u32 transceiver_info; + + *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE; + *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); return -EBUSY; + } - *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + - offsetof(struct public_port, media_type)); + transceiver_info = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data)); - qed_ptt_release(p_hwfn, p_ptt); + *p_transceiver_state = (transceiver_info & + ETH_TRANSCEIVER_STATE_MASK) >> + ETH_TRANSCEIVER_STATE_OFFSET; + + if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) + *p_transceiver_type = (transceiver_info & + ETH_TRANSCEIVER_TYPE_MASK) >> + ETH_TRANSCEIVER_TYPE_OFFSET; + else + *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN; + + return 0; +} +static bool qed_is_transceiver_ready(u32 transceiver_state, + u32 transceiver_type) +{ + if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && + ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && + (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) + return true; + + return false; +} + +int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_speed_mask) +{ + u32 transceiver_type, transceiver_state; + + qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, + &transceiver_type); + + if (qed_is_transceiver_ready(transceiver_state, transceiver_type) == + false) + return -EINVAL; + + switch (transceiver_type) { + case ETH_TRANSCEIVER_TYPE_1G_LX: + case ETH_TRANSCEIVER_TYPE_1G_SX: + case ETH_TRANSCEIVER_TYPE_1G_PCC: + case ETH_TRANSCEIVER_TYPE_1G_ACC: + case ETH_TRANSCEIVER_TYPE_1000BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + case ETH_TRANSCEIVER_TYPE_10G_SR: + case ETH_TRANSCEIVER_TYPE_10G_LR: + case ETH_TRANSCEIVER_TYPE_10G_LRM: + case ETH_TRANSCEIVER_TYPE_10G_ER: + case ETH_TRANSCEIVER_TYPE_10G_PCC: + case ETH_TRANSCEIVER_TYPE_10G_ACC: + case ETH_TRANSCEIVER_TYPE_4x10G: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + case ETH_TRANSCEIVER_TYPE_40G_LR4: + case ETH_TRANSCEIVER_TYPE_40G_SR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + case ETH_TRANSCEIVER_TYPE_100G_AOC: + case ETH_TRANSCEIVER_TYPE_100G_SR4: + case ETH_TRANSCEIVER_TYPE_100G_LR4: + case ETH_TRANSCEIVER_TYPE_100G_ER4: + case ETH_TRANSCEIVER_TYPE_100G_ACC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + case ETH_TRANSCEIVER_TYPE_25G_SR: + case ETH_TRANSCEIVER_TYPE_25G_LR: + case ETH_TRANSCEIVER_TYPE_25G_AOC: + case ETH_TRANSCEIVER_TYPE_25G_ACC_S: + case ETH_TRANSCEIVER_TYPE_25G_ACC_M: + case ETH_TRANSCEIVER_TYPE_25G_ACC_L: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + case ETH_TRANSCEIVER_TYPE_25G_CA_N: + case ETH_TRANSCEIVER_TYPE_25G_CA_S: + case ETH_TRANSCEIVER_TYPE_25G_CA_L: + case ETH_TRANSCEIVER_TYPE_4x25G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + case ETH_TRANSCEIVER_TYPE_40G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + case ETH_TRANSCEIVER_TYPE_100G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + case ETH_TRANSCEIVER_TYPE_XLPPI: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + break; + case ETH_TRANSCEIVER_TYPE_10G_BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + default: + DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n", + transceiver_type); + *p_speed_mask = 0xff; + break; + } + + return 0; +} + +int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_board_config) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); + return -EBUSY; + } + if (!p_ptt) { + *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; + return -EINVAL; + } + + nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); + nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + *p_board_config = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + board_cfg)); return 0; } @@ -3351,7 +3548,8 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 mcp_resp, mcp_param, features; - features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE; + features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | + DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 85e6b3989e7a..1adfe52b3905 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -322,14 +322,61 @@ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, * @brief Get media type value of the port. * * @param cdev - qed dev pointer + * @param p_ptt * @param mfw_ver - media type value * * @return int - * 0 - Operation was successul. * -EBUSY - Operation failed */ -int qed_mcp_get_media_type(struct qed_dev *cdev, - u32 *media_type); +int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *media_type); + +/** + * @brief Get transceiver data of the port. + * + * @param cdev - qed dev pointer + * @param p_ptt + * @param p_transceiver_state - transceiver state. + * @param p_transceiver_type - media type value + * + * @return int - + * 0 - Operation was successful. + * -EBUSY - Operation failed + */ +int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_transceiver_state, + u32 *p_tranceiver_type); + +/** + * @brief Get transceiver supported speed mask. + * + * @param cdev - qed dev pointer + * @param p_ptt + * @param p_speed_mask - Bit mask of all supported speeds. + * + * @return int - + * 0 - Operation was successful. + * -EBUSY - Operation failed + */ + +int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_speed_mask); + +/** + * @brief Get board configuration. + * + * @param cdev - qed dev pointer + * @param p_ptt + * @param p_board_config - Board config. + * + * @return int - + * 0 - Operation was successful. + * -EBUSY - Operation failed + */ +int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_board_config); /** * @brief General function for sending commands to the MCP diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index 6172354b451c..ffac4ac87394 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -211,9 +211,8 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, if (!p_buffer) break; - list_del(&p_buffer->list_entry); - list_add_tail(&p_buffer->list_entry, - &p_ooo_info->free_buffers_list); + list_move_tail(&p_buffer->list_entry, + &p_ooo_info->free_buffers_list); } list_add_tail(&p_isle->list_entry, &p_ooo_info->free_isles_list); @@ -247,9 +246,8 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, if (!p_buffer) break; - list_del(&p_buffer->list_entry); - list_add_tail(&p_buffer->list_entry, - &p_ooo_info->free_buffers_list); + list_move_tail(&p_buffer->list_entry, + &p_ooo_info->free_buffers_list); } list_add_tail(&p_isle->list_entry, &p_ooo_info->free_isles_list); @@ -353,11 +351,9 @@ void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, u32 cid, u8 drop_isle, u8 drop_size) { - struct qed_ooo_archipelago *p_archipelago = NULL; struct qed_ooo_isle *p_isle = NULL; u8 isle_idx; - p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); for (isle_idx = 0; isle_idx < drop_size; isle_idx++) { p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle); if (!p_isle) { @@ -462,7 +458,6 @@ void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle) { - struct qed_ooo_archipelago *p_archipelago = NULL; struct qed_ooo_isle *p_right_isle = NULL; struct qed_ooo_isle *p_left_isle = NULL; @@ -475,7 +470,6 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, return; } - p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); list_del(&p_right_isle->list_entry); p_ooo_info->cur_isles_number--; if (left_isle) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 1673fc90027f..c4a6274dd625 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -730,8 +730,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, !list_empty(head)) { struct qed_spq_entry *p_ent = list_first_entry(head, struct qed_spq_entry, list); - list_del(&p_ent->list); - list_add_tail(&p_ent->list, &p_spq->completion_pending); + list_move_tail(&p_ent->list, &p_spq->completion_pending); p_spq->comp_sent_count++; rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent); diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index be118d057b92..b6cccf44bf40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1688,7 +1688,7 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) ops->ports_update(cookie, vxlan_port, geneve_port); /* Always update link configuration according to bulletin */ - qed_link_update(hwfn); + qed_link_update(hwfn, NULL); } void qed_iov_vf_task(struct work_struct *work) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 6a4d266fb8e2..de98a974673b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -440,7 +440,7 @@ struct qede_fastpath { struct qede_tx_queue *txq; struct qede_tx_queue *xdp_tx; -#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) +#define VEC_NAME_SIZE (FIELD_SIZEOF(struct net_device, name) + 8) char name[VEC_NAME_SIZE]; }; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 19652cd27ca7..8cbbd628fd73 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -413,18 +413,42 @@ struct qede_link_mode_mapping { }; static const struct qede_link_mode_mapping qed_lm_map[] = { - {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT}, {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, - {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT}, {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, + {QED_LM_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT}, + {QED_LM_2500baseX_Full_BIT, ETHTOOL_LINK_MODE_2500baseX_Full_BIT}, + {QED_LM_Backplane_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, + {QED_LM_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, + {QED_LM_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT}, {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, - {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, + {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, + {QED_LM_10000baseR_FEC_BIT, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT}, + {QED_LM_20000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT}, + {QED_LM_40000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT}, + {QED_LM_40000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT}, + {QED_LM_40000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT}, {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, + {QED_LM_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT}, + {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, + {QED_LM_25000baseSR_Full_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT}, + {QED_LM_50000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT}, {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, {QED_LM_100000baseKR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, + {QED_LM_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT}, + {QED_LM_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT}, + {QED_LM_100000baseLR4_ER4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT}, + {QED_LM_50000baseSR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT}, + {QED_LM_1000baseX_Full_BIT, ETHTOOL_LINK_MODE_1000baseX_Full_BIT}, + {QED_LM_10000baseCR_Full_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT}, + {QED_LM_10000baseSR_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT}, + {QED_LM_10000baseLR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT}, + {QED_LM_10000baseLRM_Full_BIT, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT}, }; #define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \ @@ -494,6 +518,7 @@ static int qede_set_link_ksettings(struct net_device *dev, struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; struct qed_link_params params; + u32 sup_caps; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, "Link settings are not allowed to be changed\n"); @@ -520,52 +545,85 @@ static int qede_set_link_ksettings(struct net_device *dev, params.forced_speed = base->speed; switch (base->speed) { case SPEED_1000: - if (!(current_link.supported_caps & - QED_LM_1000baseT_Full_BIT)) { + sup_caps = QED_LM_1000baseT_Full_BIT | + QED_LM_1000baseKX_Full_BIT | + QED_LM_1000baseX_Full_BIT; + if (!(current_link.supported_caps & sup_caps)) { DP_INFO(edev, "1G speed not supported\n"); return -EINVAL; } - params.adv_speeds = QED_LM_1000baseT_Full_BIT; + params.adv_speeds = current_link.supported_caps & + sup_caps; break; case SPEED_10000: - if (!(current_link.supported_caps & - QED_LM_10000baseKR_Full_BIT)) { + sup_caps = QED_LM_10000baseT_Full_BIT | + QED_LM_10000baseKR_Full_BIT | + QED_LM_10000baseKX4_Full_BIT | + QED_LM_10000baseR_FEC_BIT | + QED_LM_10000baseCR_Full_BIT | + QED_LM_10000baseSR_Full_BIT | + QED_LM_10000baseLR_Full_BIT | + QED_LM_10000baseLRM_Full_BIT; + if (!(current_link.supported_caps & sup_caps)) { DP_INFO(edev, "10G speed not supported\n"); return -EINVAL; } - params.adv_speeds = QED_LM_10000baseKR_Full_BIT; + params.adv_speeds = current_link.supported_caps & + sup_caps; break; - case SPEED_25000: + case SPEED_20000: if (!(current_link.supported_caps & - QED_LM_25000baseKR_Full_BIT)) { + QED_LM_20000baseKR2_Full_BIT)) { + DP_INFO(edev, "20G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = QED_LM_20000baseKR2_Full_BIT; + break; + case SPEED_25000: + sup_caps = QED_LM_25000baseKR_Full_BIT | + QED_LM_25000baseCR_Full_BIT | + QED_LM_25000baseSR_Full_BIT; + if (!(current_link.supported_caps & sup_caps)) { DP_INFO(edev, "25G speed not supported\n"); return -EINVAL; } - params.adv_speeds = QED_LM_25000baseKR_Full_BIT; + params.adv_speeds = current_link.supported_caps & + sup_caps; break; case SPEED_40000: - if (!(current_link.supported_caps & - QED_LM_40000baseLR4_Full_BIT)) { + sup_caps = QED_LM_40000baseLR4_Full_BIT | + QED_LM_40000baseKR4_Full_BIT | + QED_LM_40000baseCR4_Full_BIT | + QED_LM_40000baseSR4_Full_BIT; + if (!(current_link.supported_caps & sup_caps)) { DP_INFO(edev, "40G speed not supported\n"); return -EINVAL; } - params.adv_speeds = QED_LM_40000baseLR4_Full_BIT; + params.adv_speeds = current_link.supported_caps & + sup_caps; break; case SPEED_50000: - if (!(current_link.supported_caps & - QED_LM_50000baseKR2_Full_BIT)) { + sup_caps = QED_LM_50000baseKR2_Full_BIT | + QED_LM_50000baseCR2_Full_BIT | + QED_LM_50000baseSR2_Full_BIT; + if (!(current_link.supported_caps & sup_caps)) { DP_INFO(edev, "50G speed not supported\n"); return -EINVAL; } - params.adv_speeds = QED_LM_50000baseKR2_Full_BIT; + params.adv_speeds = current_link.supported_caps & + sup_caps; break; case SPEED_100000: - if (!(current_link.supported_caps & - QED_LM_100000baseKR4_Full_BIT)) { + sup_caps = QED_LM_100000baseKR4_Full_BIT | + QED_LM_100000baseSR4_Full_BIT | + QED_LM_100000baseCR4_Full_BIT | + QED_LM_100000baseLR4_ER4_Full_BIT; + if (!(current_link.supported_caps & sup_caps)) { DP_INFO(edev, "100G speed not supported\n"); return -EINVAL; } - params.adv_speeds = QED_LM_100000baseKR4_Full_BIT; + params.adv_speeds = current_link.supported_caps & + sup_caps; break; default: DP_INFO(edev, "Unsupported speed %u\n", base->speed); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index b48f76182049..10b075bc5959 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev) qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); - ql_write_nvram_reg(qdev, spir, - ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); } /* diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index a79d84f99102..2a533280b124 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -4233,7 +4233,6 @@ static void qlcnic_83xx_io_resume(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - pci_cleanup_aer_uncorrect_error_status(pdev); if (test_and_clear_bit(__QLCNIC_AER, &adapter->state)) qlcnic_83xx_aer_start_poll_work(adapter); } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index 4b76c69fe86d..834208e55f7b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -883,7 +883,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid, struct qlcnic_adapter *adapter = netdev_priv(netdev); if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) - return 0; + return 1; switch (capid) { case DCB_CAP_ATTR_PG: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index dbd48012224f..d42ba2293d8c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -3930,7 +3930,6 @@ static void qlcnic_82xx_io_resume(struct pci_dev *pdev) u32 state; struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - pci_cleanup_aer_uncorrect_error_status(pdev); state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER, &adapter->state)) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 77e386ebff09..f7c2f32237cb 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -904,13 +904,11 @@ static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u32 size) { struct qlcnic_hardware_context *ahw = adapter->ahw; - u32 fw_mbx; u8 i, max = 2, hdr_size, j; hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); max = (size / sizeof(u32)) + hdr_size; - fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0)); for (i = 2, j = 0; j < hdr_size; i++, j++) *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i)); for (; j < max; i++, j++) @@ -936,7 +934,7 @@ static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf) static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) { struct qlcnic_vf_info *vf = trans->vf; - u32 pay_size, hdr_size; + u32 pay_size; u32 *hdr, *pay; int ret; u8 pci_func = trans->func_id; @@ -947,14 +945,12 @@ static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) if (type == QLC_BC_COMMAND) { hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag); pay = (u32 *)(trans->req_pay + trans->curr_req_frag); - hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, trans->curr_req_frag); pay_size = (pay_size / sizeof(u32)); } else { hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag); pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag); - hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, trans->curr_rsp_frag); pay_size = (pay_size / sizeof(u32)); diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c index 6c8543fb90c0..4292c89bd35c 100644 --- a/drivers/net/ethernet/qualcomm/qca_7k.c +++ b/drivers/net/ethernet/qualcomm/qca_7k.c @@ -81,8 +81,8 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result) return ret; } -int -qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) +static int +__qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) { __be16 tx_data[2]; struct spi_transfer transfer[2]; @@ -117,3 +117,33 @@ qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) return ret; } + +int +qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value, int retry) +{ + int ret, i = 0; + u16 confirmed; + + do { + ret = __qcaspi_write_register(qca, reg, value); + if (ret) + return ret; + + if (!retry) + return 0; + + ret = qcaspi_read_register(qca, reg, &confirmed); + if (ret) + return ret; + + ret = confirmed != value; + if (!ret) + return 0; + + i++; + qca->stats.write_verify_failed++; + + } while (i <= retry); + + return ret; +} diff --git a/drivers/net/ethernet/qualcomm/qca_7k.h b/drivers/net/ethernet/qualcomm/qca_7k.h index 27124c2bb77a..356de8ec5d48 100644 --- a/drivers/net/ethernet/qualcomm/qca_7k.h +++ b/drivers/net/ethernet/qualcomm/qca_7k.h @@ -66,6 +66,6 @@ void qcaspi_spi_error(struct qcaspi *qca); int qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result); -int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value); +int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value, int retry); #endif /* _QCA_7K_H */ diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index 51d89c86e60f..a9f1bc013364 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -60,6 +60,7 @@ static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = { "Write buffer misses", "Transmit ring full", "SPI errors", + "Write verify errors", }; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 66b775d462fd..d5310504f436 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -69,6 +69,12 @@ static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN; module_param(qcaspi_pluggable, int, 0); MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no)."); +#define QCASPI_WRITE_VERIFY_MIN 0 +#define QCASPI_WRITE_VERIFY_MAX 3 +static int wr_verify = QCASPI_WRITE_VERIFY_MIN; +module_param(wr_verify, int, 0); +MODULE_PARM_DESC(wr_verify, "SPI register write verify trails. Use 0-3."); + #define QCASPI_TX_TIMEOUT (1 * HZ) #define QCASPI_QCA7K_REBOOT_TIME_MS 1000 @@ -77,7 +83,7 @@ start_spi_intr_handling(struct qcaspi *qca, u16 *intr_cause) { *intr_cause = 0; - qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0); + qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify); qcaspi_read_register(qca, SPI_REG_INTR_CAUSE, intr_cause); netdev_dbg(qca->net_dev, "interrupts: 0x%04x\n", *intr_cause); } @@ -90,8 +96,8 @@ end_spi_intr_handling(struct qcaspi *qca, u16 intr_cause) SPI_INT_RDBUF_ERR | SPI_INT_WRBUF_ERR); - qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, intr_cause); - qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, intr_enable); + qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, intr_cause, 0); + qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, intr_enable, wr_verify); netdev_dbg(qca->net_dev, "acking int: 0x%04x\n", intr_cause); } @@ -239,7 +245,7 @@ qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb) len = skb->len; - qcaspi_write_register(qca, SPI_REG_BFR_SIZE, len); + qcaspi_write_register(qca, SPI_REG_BFR_SIZE, len, wr_verify); if (qca->legacy_mode) qcaspi_tx_cmd(qca, QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); @@ -345,6 +351,7 @@ qcaspi_receive(struct qcaspi *qca) /* Read the packet size. */ qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available); + netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", available); @@ -353,7 +360,7 @@ qcaspi_receive(struct qcaspi *qca) return -1; } - qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available); + qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available, wr_verify); if (qca->legacy_mode) qcaspi_tx_cmd(qca, QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); @@ -524,7 +531,7 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event) netdev_dbg(qca->net_dev, "sync: resetting device.\n"); qcaspi_read_register(qca, SPI_REG_SPI_CONFIG, &spi_config); spi_config |= QCASPI_SLAVE_RESET_BIT; - qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, spi_config); + qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, spi_config, 0); qca->sync = QCASPI_SYNC_RESET; qca->stats.trig_reset++; @@ -684,7 +691,7 @@ qcaspi_netdev_close(struct net_device *dev) netif_stop_queue(dev); - qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0); + qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify); free_irq(qca->spi_dev->irq, qca); kthread_stop(qca->spi_thread); @@ -904,6 +911,13 @@ qca_spi_probe(struct spi_device *spi) return -EINVAL; } + if (wr_verify < QCASPI_WRITE_VERIFY_MIN || + wr_verify > QCASPI_WRITE_VERIFY_MAX) { + dev_err(&spi->dev, "Invalid write verify: %d\n", + wr_verify); + return -EINVAL; + } + dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n", QCASPI_DRV_VERSION, qcaspi_clkspeed, diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index fc0e98726b36..2d2c49726492 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -73,6 +73,7 @@ struct qcaspi_stats { u64 write_buf_miss; u64 ring_full; u64 spi_err; + u64 write_verify_failed; }; struct qcaspi { diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index aa11b70b9ca4..04aa592f35c3 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1024,16 +1024,8 @@ static int r6040_mii_probe(struct net_device *dev) return PTR_ERR(phydev); } - /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg - | SUPPORTED_MII - | SUPPORTED_TP); - - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, SPEED_100); + lp->old_link = 0; lp->old_duplex = -1; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 9a5e2969df61..006b0aa8cec3 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -77,8 +77,6 @@ static const int multicast_filter_limit = 32; #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) -#define RTL8169_TX_TIMEOUT (6*HZ) - /* write/read MMIO register */ #define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) #define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) @@ -633,7 +631,6 @@ struct rtl8169_tc_offsets { enum rtl_flag { RTL_FLAG_TASK_ENABLED = 0, - RTL_FLAG_TASK_SLOW_PENDING, RTL_FLAG_TASK_RESET_PENDING, RTL_FLAG_MAX }; @@ -1354,7 +1351,8 @@ static void rtl_irq_enable_all(struct rtl8169_private *tp) static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) { rtl_irq_disable(tp); - rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); + rtl_ack_events(tp, 0xffff); + /* PCI commit */ RTL_R8(tp, ChipCmd); } @@ -4048,23 +4046,11 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) rtl_hw_phy_config(dev); if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { - netif_dbg(tp, drv, dev, - "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); - RTL_W8(tp, 0x82, 0x01); - } - - pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); - - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); - - if (tp->mac_version == RTL_GIGA_MAC_VER_02) { netif_dbg(tp, drv, dev, "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); - netif_dbg(tp, drv, dev, - "Set PHY Reg 0x0bh = 0x00h\n"); - rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 } /* We may have called phy_speed_down before */ @@ -4282,8 +4268,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: @@ -4566,27 +4552,19 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version) { - static const struct rtl_cfg2_info { - u32 mac_version; - u32 clk; - u32 val; - } cfg2_info [] = { - { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd - { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, - { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe - { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } - }; - const struct rtl_cfg2_info *p = cfg2_info; - unsigned int i; - u32 clk; + u32 val; - clk = RTL_R8(tp, Config2) & PCI_Clock_66MHz; - for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) { - if ((p->mac_version == mac_version) && (p->clk == clk)) { - RTL_W32(tp, 0x7c, p->val); - break; - } - } + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + val = 0x000fff00; + else if (tp->mac_version == RTL_GIGA_MAC_VER_06) + val = 0x00ffff00; + else + return; + + if (RTL_R8(tp, Config2) & PCI_Clock_66MHz) + val |= 0xff; + + RTL_W32(tp, 0x7c, val); } static void rtl_set_rx_mode(struct net_device *dev) @@ -5873,6 +5851,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp) { rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); tp->cur_tx = tp->dirty_tx = 0; + netdev_reset_queue(tp->dev); } static void rtl_reset_work(struct rtl8169_private *tp) @@ -6175,6 +6154,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); + netdev_sent_queue(dev, skb->len); + skb_tx_timestamp(skb); /* Force memory writes to complete before releasing descriptor */ @@ -6273,7 +6254,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) { - unsigned int dirty_tx, tx_left; + unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0; dirty_tx = tp->dirty_tx; smp_rmb(); @@ -6297,10 +6278,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb, tp->TxDescArray + entry); if (status & LastFrag) { - u64_stats_update_begin(&tp->tx_stats.syncp); - tp->tx_stats.packets++; - tp->tx_stats.bytes += tx_skb->skb->len; - u64_stats_update_end(&tp->tx_stats.syncp); + pkts_compl++; + bytes_compl += tx_skb->skb->len; dev_consume_skb_any(tx_skb->skb); tx_skb->skb = NULL; } @@ -6309,6 +6288,13 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) } if (tp->dirty_tx != dirty_tx) { + netdev_completed_queue(dev, pkts_compl, bytes_compl); + + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets += pkts_compl; + tp->tx_stats.bytes += bytes_compl; + u64_stats_update_end(&tp->tx_stats.syncp); + tp->dirty_tx = dirty_tx; /* Sync with rtl8169_start_xmit: * - publish dirty_tx ring index (write barrier) @@ -6473,42 +6459,29 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow))) return IRQ_NONE; - rtl_irq_disable(tp); - napi_schedule_irqoff(&tp->napi); - - return IRQ_HANDLED; -} - -/* - * Workqueue context. - */ -static void rtl_slow_event_work(struct rtl8169_private *tp) -{ - struct net_device *dev = tp->dev; - u16 status; + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(tp->dev); + goto out; + } - status = rtl_get_events(tp) & tp->event_slow; - rtl_ack_events(tp, status); + if (status & LinkChg) + phy_mac_interrupt(tp->dev->phydev); - if (unlikely(status & RxFIFOOver)) { - switch (tp->mac_version) { - /* Work around for rx fifo overflow */ - case RTL_GIGA_MAC_VER_11: - netif_stop_queue(dev); - /* XXX - Hack alert. See rtl_task(). */ - set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); - default: - break; - } + if (unlikely(status & RxFIFOOver && + tp->mac_version == RTL_GIGA_MAC_VER_11)) { + netif_stop_queue(tp->dev); + /* XXX - Hack alert. See rtl_task(). */ + set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); } - if (unlikely(status & SYSErr)) - rtl8169_pcierr_interrupt(dev); - - if (status & LinkChg) - phy_mac_interrupt(dev->phydev); + if (status & RTL_EVENT_NAPI) { + rtl_irq_disable(tp); + napi_schedule_irqoff(&tp->napi); + } +out: + rtl_ack_events(tp, status); - rtl_irq_enable_all(tp); + return IRQ_HANDLED; } static void rtl_task(struct work_struct *work) @@ -6517,8 +6490,6 @@ static void rtl_task(struct work_struct *work) int bitnr; void (*action)(struct rtl8169_private *); } rtl_work[] = { - /* XXX - keep rtl_slow_event_work() as first element. */ - { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, }; struct rtl8169_private *tp = @@ -6548,29 +6519,16 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) { struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); struct net_device *dev = tp->dev; - u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow; - int work_done= 0; - u16 status; + int work_done; - status = rtl_get_events(tp); - rtl_ack_events(tp, status & ~tp->event_slow); + work_done = rtl_rx(dev, tp, (u32) budget); - if (status & RTL_EVENT_NAPI_RX) - work_done = rtl_rx(dev, tp, (u32) budget); - - if (status & RTL_EVENT_NAPI_TX) - rtl_tx(dev, tp); - - if (status & tp->event_slow) { - enable_mask &= ~tp->event_slow; - - rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING); - } + rtl_tx(dev, tp); if (work_done < budget) { napi_complete_done(napi, work_done); - rtl_irq_enable(tp, enable_mask); + rtl_irq_enable_all(tp); mmiowb(); } @@ -6846,7 +6804,6 @@ static void rtl8169_net_suspend(struct net_device *dev) phy_stop(dev->phydev); netif_device_detach(dev); - netif_stop_queue(dev); rtl_lock_work(tp); napi_disable(&tp->napi); @@ -7093,20 +7050,12 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) { unsigned int flags; - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { RTL_W8(tp, Cfg9346, Cfg9346_Unlock); RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); RTL_W8(tp, Cfg9346, Cfg9346_Lock); flags = PCI_IRQ_LEGACY; - break; - case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_40: - /* This version was reported to have issues with resume - * from suspend when using MSI-X - */ - flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI; - break; - default: + } else { flags = PCI_IRQ_ALL_TYPES; } @@ -7368,11 +7317,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->cp_cmd = RTL_R16(tp, CPlusCmd); - if ((sizeof(dma_addr_t) > 4) && - (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && - tp->mac_version >= RTL_GIGA_MAC_VER_18)) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (sizeof(dma_addr_t) > 4 && (use_dac == 1 || (use_dac == -1 && + tp->mac_version >= RTL_GIGA_MAC_VER_18)) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ if (!pci_is_pcie(pdev)) @@ -7388,14 +7335,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_init_rxcfg(tp); - rtl_irq_disable(tp); + rtl8169_irq_mask_and_ack(tp); rtl_hw_initialize(tp); rtl_hw_reset(tp); - rtl_ack_events(tp, 0xffff); - pci_set_master(pdev); rtl_init_mdio_ops(tp); @@ -7435,7 +7380,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->dev_addr[i] = RTL_R8(tp, MAC0 + i); dev->ethtool_ops = &rtl8169_ethtool_ops; - dev->watchdog_timeo = RTL8169_TX_TIMEOUT; netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 9b6bf557a2f5..1c6e4df94f01 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -959,7 +959,10 @@ enum RAVB_QUEUE { #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 #define NUM_TX_QUEUE 2 -#define NUM_TX_DESC 2 /* TX descriptors per packet */ + +/* TX descriptors per packet */ +#define NUM_TX_DESC_GEN2 2 +#define NUM_TX_DESC_GEN3 1 struct ravb_tstamp_skb { struct list_head list; @@ -1038,6 +1041,7 @@ struct ravb_private { unsigned no_avb_link:1; unsigned avb_link_active_low:1; unsigned wol_enabled:1; + int num_tx_desc; /* TX descriptors per packet */ }; static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index d6f753925352..defed0d0c51d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -182,6 +182,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) { struct ravb_private *priv = netdev_priv(ndev); struct net_device_stats *stats = &priv->stats[q]; + int num_tx_desc = priv->num_tx_desc; struct ravb_tx_desc *desc; int free_num = 0; int entry; @@ -191,7 +192,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) bool txed; entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * - NUM_TX_DESC); + num_tx_desc); desc = &priv->tx_ring[q][entry]; txed = desc->die_dt == DT_FEMPTY; if (free_txed_only && !txed) @@ -200,12 +201,12 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) dma_rmb(); size = le16_to_cpu(desc->ds_tagl) & TX_DS; /* Free the original skb. */ - if (priv->tx_skb[q][entry / NUM_TX_DESC]) { + if (priv->tx_skb[q][entry / num_tx_desc]) { dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), size, DMA_TO_DEVICE); /* Last packet descriptor? */ - if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { - entry /= NUM_TX_DESC; + if (entry % num_tx_desc == num_tx_desc - 1) { + entry /= num_tx_desc; dev_kfree_skb_any(priv->tx_skb[q][entry]); priv->tx_skb[q][entry] = NULL; if (txed) @@ -224,6 +225,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) static void ravb_ring_free(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; int ring_size; int i; @@ -249,7 +251,7 @@ static void ravb_ring_free(struct net_device *ndev, int q) ravb_tx_free(ndev, q, false); ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + (priv->num_tx_ring[q] * num_tx_desc + 1); dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], priv->tx_desc_dma[q]); priv->tx_ring[q] = NULL; @@ -278,12 +280,13 @@ static void ravb_ring_free(struct net_device *ndev, int q) static void ravb_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; struct ravb_ex_rx_desc *rx_desc; struct ravb_tx_desc *tx_desc; struct ravb_desc *desc; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * - NUM_TX_DESC; + num_tx_desc; dma_addr_t dma_addr; int i; @@ -318,8 +321,10 @@ static void ravb_ring_format(struct net_device *ndev, int q) for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; i++, tx_desc++) { tx_desc->die_dt = DT_EEMPTY; - tx_desc++; - tx_desc->die_dt = DT_EEMPTY; + if (num_tx_desc > 1) { + tx_desc++; + tx_desc->die_dt = DT_EEMPTY; + } } tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); tx_desc->die_dt = DT_LINKFIX; /* type */ @@ -339,6 +344,7 @@ static void ravb_ring_format(struct net_device *ndev, int q) static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; struct sk_buff *skb; int ring_size; int i; @@ -362,11 +368,13 @@ static int ravb_ring_init(struct net_device *ndev, int q) priv->rx_skb[q][i] = skb; } - /* Allocate rings for the aligned buffers */ - priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + - DPTR_ALIGN - 1, GFP_KERNEL); - if (!priv->tx_align[q]) - goto error; + if (num_tx_desc > 1) { + /* Allocate rings for the aligned buffers */ + priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + + DPTR_ALIGN - 1, GFP_KERNEL); + if (!priv->tx_align[q]) + goto error; + } /* Allocate all RX descriptors. */ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); @@ -380,7 +388,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) /* Allocate all TX descriptors. */ ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + (priv->num_tx_ring[q] * num_tx_desc + 1); priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, &priv->tx_desc_dma[q], GFP_KERNEL); @@ -1074,8 +1082,11 @@ static int ravb_phy_init(struct net_device *ndev) netdev_info(ndev, "limited PHY to 100Mbit/s\n"); } - /* 10BASE is not supported */ - phydev->supported &= ~PHY_10BT_FEATURES; + /* 10BASE, Pause and Asym Pause is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); phy_attached_info(phydev); @@ -1485,6 +1496,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; u16 q = skb_get_queue_mapping(skb); struct ravb_tstamp_skb *ts_skb; struct ravb_tx_desc *desc; @@ -1496,7 +1508,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) spin_lock_irqsave(&priv->lock, flags); if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * - NUM_TX_DESC) { + num_tx_desc) { netif_err(priv, tx_queued, ndev, "still transmitting with the full ring!\n"); netif_stop_subqueue(ndev, q); @@ -1507,41 +1519,55 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (skb_put_padto(skb, ETH_ZLEN)) goto exit; - entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); - priv->tx_skb[q][entry / NUM_TX_DESC] = skb; - - buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + - entry / NUM_TX_DESC * DPTR_ALIGN; - len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; - /* Zero length DMA descriptors are problematic as they seem to - * terminate DMA transfers. Avoid them by simply using a length of - * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. - * - * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of - * data by the call to skb_put_padto() above this is safe with - * respect to both the length of the first DMA descriptor (len) - * overflowing the available data and the length of the second DMA - * descriptor (skb->len - len) being negative. - */ - if (len == 0) - len = DPTR_ALIGN; + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); + priv->tx_skb[q][entry / num_tx_desc] = skb; + + if (num_tx_desc > 1) { + buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + + entry / num_tx_desc * DPTR_ALIGN; + len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + + /* Zero length DMA descriptors are problematic as they seem + * to terminate DMA transfers. Avoid them by simply using a + * length of DPTR_ALIGN (4) when skb data is aligned to + * DPTR_ALIGN. + * + * As skb is guaranteed to have at least ETH_ZLEN (60) + * bytes of data by the call to skb_put_padto() above this + * is safe with respect to both the length of the first DMA + * descriptor (len) overflowing the available data and the + * length of the second DMA descriptor (skb->len - len) + * being negative. + */ + if (len == 0) + len = DPTR_ALIGN; - memcpy(buffer, skb->data, len); - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - goto drop; + memcpy(buffer, skb->data, len); + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop; - desc = &priv->tx_ring[q][entry]; - desc->ds_tagl = cpu_to_le16(len); - desc->dptr = cpu_to_le32(dma_addr); + desc = &priv->tx_ring[q][entry]; + desc->ds_tagl = cpu_to_le16(len); + desc->dptr = cpu_to_le32(dma_addr); - buffer = skb->data + len; - len = skb->len - len; - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - goto unmap; + buffer = skb->data + len; + len = skb->len - len; + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto unmap; - desc++; + desc++; + } else { + desc = &priv->tx_ring[q][entry]; + len = skb->len; + dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop; + } desc->ds_tagl = cpu_to_le16(len); desc->dptr = cpu_to_le32(dma_addr); @@ -1549,9 +1575,11 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (q == RAVB_NC) { ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); if (!ts_skb) { - desc--; - dma_unmap_single(ndev->dev.parent, dma_addr, len, - DMA_TO_DEVICE); + if (num_tx_desc > 1) { + desc--; + dma_unmap_single(ndev->dev.parent, dma_addr, + len, DMA_TO_DEVICE); + } goto unmap; } ts_skb->skb = skb; @@ -1568,15 +1596,18 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); /* Descriptor type must be set after all the above writes */ dma_wmb(); - desc->die_dt = DT_FEND; - desc--; - desc->die_dt = DT_FSTART; - + if (num_tx_desc > 1) { + desc->die_dt = DT_FEND; + desc--; + desc->die_dt = DT_FSTART; + } else { + desc->die_dt = DT_FSINGLE; + } ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); - priv->cur_tx[q] += NUM_TX_DESC; + priv->cur_tx[q] += num_tx_desc; if (priv->cur_tx[q] - priv->dirty_tx[q] > - (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && + (priv->num_tx_ring[q] - 1) * num_tx_desc && !ravb_tx_free(ndev, q, true)) netif_stop_subqueue(ndev, q); @@ -1590,7 +1621,7 @@ unmap: le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); drop: dev_kfree_skb_any(skb); - priv->tx_skb[q][entry / NUM_TX_DESC] = NULL; + priv->tx_skb[q][entry / num_tx_desc] = NULL; goto exit; } @@ -2076,6 +2107,9 @@ static int ravb_probe(struct platform_device *pdev) ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU; + priv->num_tx_desc = chip_id == RCAR_GEN2 ? + NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3; + /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; ndev->ethtool_ops = &ravb_ethtool_ops; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index aeafdb9ac015..beb06628f22d 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -371,7 +371,7 @@ static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info, static struct rocker_desc_info * rocker_desc_head_get(const struct rocker_dma_ring_info *info) { - static struct rocker_desc_info *desc_info; + struct rocker_desc_info *desc_info; u32 head = __pos_inc(info->head, info->size); desc_info = &info->desc_info[info->head]; @@ -402,7 +402,7 @@ static void rocker_desc_head_set(const struct rocker *rocker, static struct rocker_desc_info * rocker_desc_tail_get(struct rocker_dma_ring_info *info) { - static struct rocker_desc_info *desc_info; + struct rocker_desc_info *desc_info; if (info->tail == info->head) return NULL; /* nothing to be done between head and tail */ @@ -2728,6 +2728,7 @@ rocker_fdb_offload_notify(struct rocker_port *rocker_port, info.addr = recv_info->addr; info.vid = recv_info->vid; + info.offloaded = true; call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, rocker_port->dev, &info.info); } diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index a9da1ad4b4f2..690aee88f0eb 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -298,8 +298,8 @@ static int sxgbe_init_phy(struct net_device *ndev) /* Stop Advertising 1000BASE Capability if interface is not GMII */ if ((phy_iface == PHY_INTERFACE_MODE_MII) || (phy_iface == PHY_INTERFACE_MODE_RMII)) - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + phy_set_max_speed(phydev, SPEED_1000); + if (phydev->phy_id == 0) { phy_disconnect(phydev); return -ENODEV; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 3d0dd39c289e..98fe7e762e17 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -3821,7 +3821,6 @@ static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) { struct efx_nic *efx = pci_get_drvdata(pdev); pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; - int rc; if (pci_enable_device(pdev)) { netif_err(efx, hw, efx->net_dev, @@ -3829,13 +3828,6 @@ static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) status = PCI_ERS_RESULT_DISCONNECT; } - rc = pci_cleanup_aer_uncorrect_error_status(pdev); - if (rc) { - netif_err(efx, hw, efx->net_dev, - "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc); - /* Non-fatal error. Continue. */ - } - return status; } diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 03e2455c502e..8b1f94d7a6c5 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -3160,7 +3160,6 @@ static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev) { struct ef4_nic *efx = pci_get_drvdata(pdev); pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; - int rc; if (pci_enable_device(pdev)) { netif_err(efx, hw, efx->net_dev, @@ -3168,13 +3167,6 @@ static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev) status = PCI_ERS_RESULT_DISCONNECT; } - rc = pci_cleanup_aer_uncorrect_error_status(pdev); - if (rc) { - netif_err(efx, hw, efx->net_dev, - "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc); - /* Non-fatal error. Continue. */ - } - return status; } diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index b1b53f6c452f..8355dfbb8ec3 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -513,7 +513,8 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) * now, or set the card to generates an interrupt when ready * for the packet. */ -static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned int free; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index b944828f9ea3..4823b6a51134 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -638,7 +638,8 @@ done: if (!THROTTLE_TX_PKTS) * now, or set the card to generates an interrupt when ready * for the packet. */ -static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; @@ -2446,8 +2447,7 @@ static int smc_drv_remove(struct platform_device *pdev) static int smc_drv_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); if (ndev) { if (netif_running(ndev)) { diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index f0afb88d7bc2..99a5a8a7c777 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -1048,10 +1048,10 @@ static int smsc911x_mii_probe(struct net_device *dev) phy_attached_info(phydev); + phy_set_max_speed(phydev, SPEED_100); + /* mask with MAC supported features */ - phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); pdata->last_duplex = -1; pdata->last_carrier = -1; @@ -1786,7 +1786,8 @@ static int smsc911x_stop(struct net_device *dev) } /* Entry point for transmitting a packet */ -static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); unsigned int freespace; diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 2fa3c1d03abc..9b6366b20110 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -1135,10 +1135,10 @@ static int smsc9420_mii_probe(struct net_device *dev) return PTR_ERR(phydev); } + phy_set_max_speed(phydev, SPEED_100); + /* mask with MAC supported features */ - phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); phy_attached_info(phydev); diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 7aa5ebb6766c..d9d0d03e4ce7 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -274,6 +274,7 @@ struct netsec_priv { struct clk *clk; u32 msg_enable; u32 freq; + u32 phy_addr; bool rx_cksum_offload_flag; }; @@ -431,9 +432,12 @@ static int netsec_mac_update_to_phy_state(struct netsec_priv *priv) return 0; } +static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr); + static int netsec_phy_write(struct mii_bus *bus, int phy_addr, int reg, u16 val) { + int status; struct netsec_priv *priv = bus->priv; if (netsec_mac_write(priv, GMAC_REG_GDR, val)) @@ -446,8 +450,19 @@ static int netsec_phy_write(struct mii_bus *bus, GMAC_REG_SHIFT_CR_GAR))) return -ETIMEDOUT; - return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR, - NETSEC_GMAC_GAR_REG_GB); + status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR, + NETSEC_GMAC_GAR_REG_GB); + + /* Developerbox implements RTL8211E PHY and there is + * a compatibility problem with F_GMAC4. + * RTL8211E expects MDC clock must be kept toggling for several + * clock cycle with MDIO high before entering the IDLE state. + * To meet this requirement, netsec driver needs to issue dummy + * read(e.g. read PHYID1(offset 0x2) register) right after write. + */ + netsec_phy_read(bus, phy_addr, MII_PHYSID1); + + return status; } static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr) @@ -735,8 +750,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) u16 idx = dring->tail; struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); - if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) + if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { + /* reading the register clears the irq */ + netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT); break; + } /* This barrier is needed to keep us from reading * any other fields out of the netsec_de until we have @@ -937,6 +955,9 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id) dring->head = 0; dring->tail = 0; dring->pkt_cnt = 0; + + if (id == NETSEC_RING_TX) + netdev_reset_queue(priv->ndev); } static void netsec_free_dring(struct netsec_priv *priv, int id) @@ -1340,11 +1361,11 @@ static int netsec_netdev_stop(struct net_device *ndev) netsec_uninit_pkt_dring(priv, NETSEC_RING_TX); netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); - ret = netsec_reset_hardware(priv, false); - phy_stop(ndev->phydev); phy_disconnect(ndev->phydev); + ret = netsec_reset_hardware(priv, false); + pm_runtime_put_sync(priv->dev); return ret; @@ -1354,6 +1375,7 @@ static int netsec_netdev_init(struct net_device *ndev) { struct netsec_priv *priv = netdev_priv(ndev); int ret; + u16 data; ret = netsec_alloc_dring(priv, NETSEC_RING_TX); if (ret) @@ -1363,6 +1385,11 @@ static int netsec_netdev_init(struct net_device *ndev) if (ret) goto err1; + /* set phy power down */ + data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) | + BMCR_PDOWN; + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); + ret = netsec_reset_hardware(priv, true); if (ret) goto err2; @@ -1412,7 +1439,7 @@ static const struct net_device_ops netsec_netdev_ops = { }; static int netsec_of_probe(struct platform_device *pdev, - struct netsec_priv *priv) + struct netsec_priv *priv, u32 *phy_addr) { priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!priv->phy_np) { @@ -1420,6 +1447,8 @@ static int netsec_of_probe(struct platform_device *pdev, return -EINVAL; } + *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np); + priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */ if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "phy_ref_clk not found\n"); @@ -1620,12 +1649,14 @@ static int netsec_probe(struct platform_device *pdev) } if (dev_of_node(&pdev->dev)) - ret = netsec_of_probe(pdev, priv); + ret = netsec_of_probe(pdev, priv, &phy_addr); else ret = netsec_acpi_probe(pdev, priv, &phy_addr); if (ret) goto free_ndev; + priv->phy_addr = phy_addr; + if (!priv->freq) { dev_err(&pdev->dev, "missing PHY reference clock frequency\n"); ret = -ENODEV; diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index f7ecceeb1e28..6732f5cbde08 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -461,16 +461,7 @@ static int ave_ethtool_set_pauseparam(struct net_device *ndev, priv->pause_rx = pause->rx_pause; priv->pause_tx = pause->tx_pause; - phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - if (pause->rx_pause) - phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - if (pause->tx_pause) - phydev->advertising ^= ADVERTISED_Asym_Pause; - - if (pause->autoneg) { - if (netif_running(ndev)) - phy_start_aneg(phydev); - } + phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); return 0; } @@ -904,11 +895,11 @@ static void ave_rxfifo_reset(struct net_device *ndev) /* assert reset */ writel(AVE_GRR_RXFFR, priv->base + AVE_GRR); - usleep_range(40, 50); + udelay(50); /* negate reset */ writel(0, priv->base + AVE_GRR); - usleep_range(10, 20); + udelay(20); /* negate interrupt status */ writel(AVE_GI_RXOVF, priv->base + AVE_GISR); @@ -1125,11 +1116,8 @@ static void ave_phy_adjust_link(struct net_device *ndev) rmt_adv |= LPA_PAUSE_CAP; if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - if (phydev->advertising & ADVERTISED_Pause) - lcl_adv |= ADVERTISE_PAUSE_CAP; - if (phydev->advertising & ADVERTISED_Asym_Pause) - lcl_adv |= ADVERTISE_PAUSE_ASYM; + lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising); cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (cap & FLOW_CTRL_TX) txcr |= AVE_TXCR_FLOCTR; @@ -1223,11 +1211,10 @@ static int ave_init(struct net_device *ndev) phy_ethtool_get_wol(phydev, &wol); device_set_wakeup_capable(&ndev->dev, !!wol.supported); - if (!phy_interface_is_rgmii(phydev)) { - phydev->supported &= ~PHY_GBIT_FEATURES; - phydev->supported |= PHY_BASIC_FEATURES; - } - phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + if (!phy_interface_is_rgmii(phydev)) + phy_set_max_speed(phydev, SPEED_100); + + phy_support_asym_pause(phydev); phy_attached_info(phydev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 0a80fa25afe3..d6bb953685fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -119,11 +119,23 @@ #define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x))) #define XGMAC_TQS GENMASK(25, 16) #define XGMAC_TQS_SHIFT 16 +#define XGMAC_Q2TCMAP GENMASK(10, 8) +#define XGMAC_Q2TCMAP_SHIFT 8 #define XGMAC_TTC GENMASK(6, 4) #define XGMAC_TTC_SHIFT 4 #define XGMAC_TXQEN GENMASK(3, 2) #define XGMAC_TXQEN_SHIFT 2 #define XGMAC_TSF BIT(1) +#define XGMAC_MTL_TCx_ETS_CONTROL(x) (0x00001110 + (0x80 * (x))) +#define XGMAC_MTL_TCx_QUANTUM_WEIGHT(x) (0x00001118 + (0x80 * (x))) +#define XGMAC_MTL_TCx_SENDSLOPE(x) (0x0000111c + (0x80 * (x))) +#define XGMAC_MTL_TCx_HICREDIT(x) (0x00001120 + (0x80 * (x))) +#define XGMAC_MTL_TCx_LOCREDIT(x) (0x00001124 + (0x80 * (x))) +#define XGMAC_CC BIT(3) +#define XGMAC_TSA GENMASK(1, 0) +#define XGMAC_SP (0x0 << 0) +#define XGMAC_CBS (0x1 << 0) +#define XGMAC_ETS (0x2 << 0) #define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x))) #define XGMAC_RQS GENMASK(25, 16) #define XGMAC_RQS_SHIFT 16 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index d182f82f7b58..64b8cb88ea45 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -177,6 +177,23 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue, writel(value, ioaddr + reg); } +static void dwxgmac2_config_cbs(struct mac_device_info *hw, + u32 send_slope, u32 idle_slope, + u32 high_credit, u32 low_credit, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue)); + writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue)); + writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue)); + writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue)); + + value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); + value |= XGMAC_CC | XGMAC_CBS; + writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); +} + static int dwxgmac2_host_irq_status(struct mac_device_info *hw, struct stmmac_extra_stats *x) { @@ -316,7 +333,7 @@ const struct stmmac_ops dwxgmac210_ops = { .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms, .set_mtl_tx_queue_weight = NULL, .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma, - .config_cbs = NULL, + .config_cbs = dwxgmac2_config_cbs, .dump_regs = NULL, .host_irq_status = dwxgmac2_host_irq_status, .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 20909036e002..6c5092e7771c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -182,6 +182,9 @@ static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode, value |= 0x7 << XGMAC_TTC_SHIFT; } + /* Use static TC to Queue mapping */ + value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP; + value &= ~XGMAC_TXQEN; if (qmode != MTL_QUEUE_AVB) value |= 0x2 << XGMAC_TXQEN_SHIFT; @@ -374,6 +377,21 @@ static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan) writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); } +static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) +{ + u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); + + value &= ~XGMAC_TXQEN; + if (qmode != MTL_QUEUE_AVB) { + value |= 0x2 << XGMAC_TXQEN_SHIFT; + writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel)); + } else { + value |= 0x1 << XGMAC_TXQEN_SHIFT; + } + + writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); +} + static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) { u32 value; @@ -407,5 +425,6 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = { .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr, .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr, .enable_tso = dwxgmac2_enable_tso, + .qmode = dwxgmac2_qmode, .set_bfsize = dwxgmac2_set_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 357309a6d6a5..81b966a8261b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -133,7 +133,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac4_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = NULL, + .tc = &dwmac510_tc_ops, .setup = dwmac4_setup, .quirks = stmmac_dwmac4_quirks, }, { @@ -150,7 +150,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac410_ops, .hwtimestamp = &stmmac_ptp, .mode = &dwmac4_ring_mode_ops, - .tc = NULL, + .tc = &dwmac510_tc_ops, .setup = dwmac4_setup, .quirks = NULL, }, { @@ -167,7 +167,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac410_ops, .hwtimestamp = &stmmac_ptp, .mode = &dwmac4_ring_mode_ops, - .tc = NULL, + .tc = &dwmac510_tc_ops, .setup = dwmac4_setup, .quirks = NULL, }, { @@ -201,7 +201,7 @@ static const struct stmmac_hwif_entry { .mac = &dwxgmac210_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = NULL, + .tc = &dwmac510_tc_ops, .setup = dwxgmac2_setup, .quirks = NULL, }, diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index a7ffc73fffe8..abc3f85270cd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, - STMMAC_RING_MODE, 0, false, skb->len); + STMMAC_RING_MODE, 1, false, skb->len); tx_q->tx_skbuff[entry] = NULL; entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); @@ -91,7 +91,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, - STMMAC_RING_MODE, 0, true, skb->len); + STMMAC_RING_MODE, 1, true, skb->len); } tx_q->cur_tx = entry; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 75896d6ba6e2..076a8be18d67 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -991,17 +991,20 @@ static int stmmac_init_phy(struct net_device *dev) if ((interface == PHY_INTERFACE_MODE_MII) || (interface == PHY_INTERFACE_MODE_RMII) || (max_speed < 1000 && max_speed > 0)) - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + phy_set_max_speed(phydev, SPEED_100); /* * Half-duplex mode not supported with multiqueue * half-duplex can only works with single queue */ - if (tx_cnt > 1) - phydev->supported &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_100baseT_Half | - SUPPORTED_10baseT_Half); + if (tx_cnt > 1) { + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + } /* * Broken HW is sometimes missing the pull-up resistor on the diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index d42f47f6c632..644e42c181ee 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -113,7 +113,7 @@ static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb, } /* Wrappers to common functions */ -static int vsw_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t vsw_start_xmit(struct sk_buff *skb, struct net_device *dev) { return sunvnet_start_xmit_common(skb, dev, vsw_tx_port_find); } diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index f047b2797156..720b7ac77f3b 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -950,7 +950,8 @@ static void bigmac_tx_timeout(struct net_device *dev) } /* Put a packet on the wire. */ -static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); int len, entry; diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 06da2f59fcbf..863fd602fd33 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2999,7 +2999,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, /* Now make sure pci_dev cookie is there. */ #ifdef CONFIG_SPARC dp = pci_device_to_OF_node(pdev); - strcpy(prom_name, dp->name); + snprintf(prom_name, sizeof(prom_name), "%pOFn", dp); #else if (is_quattro_p(pdev)) strcpy(prom_name, "SUNW,qfe"); diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 7fe0d5e33922..1468fa0a54e9 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -570,7 +570,7 @@ out: } /* Get a packet queued to go onto the wire. */ -static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t qe_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct sunqe_buffers *qbufs = qep->buffers; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 12539b357a78..590172818b92 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -247,7 +247,7 @@ static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, } /* Wrappers to common functions */ -static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) { return sunvnet_start_xmit_common(skb, dev, vnet_tx_port_find); } diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index d8f4c3f28150..baa3088b475c 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1216,9 +1216,10 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) return skb; } -static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, - struct vnet_port *(*vnet_tx_port) - (struct sk_buff *, struct net_device *)) +static netdev_tx_t +vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)) { struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; @@ -1321,9 +1322,10 @@ out_dropped: return NETDEV_TX_OK; } -int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, - struct vnet_port *(*vnet_tx_port) - (struct sk_buff *, struct net_device *)) +netdev_tx_t +sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)) { struct vnet_port *port = NULL; struct vio_dring_state *dr; diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h index 1ea0b016580a..2b808d2482d6 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.h +++ b/drivers/net/ethernet/sun/sunvnet_common.h @@ -136,9 +136,10 @@ int sunvnet_close_common(struct net_device *dev); void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp); int sunvnet_set_mac_addr_common(struct net_device *dev, void *p); void sunvnet_tx_timeout_common(struct net_device *dev); -int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, - struct vnet_port *(*vnet_tx_port) - (struct sk_buff *, struct net_device *)); +netdev_tx_t +sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)); #ifdef CONFIG_NET_POLL_CONTROLLER void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp); #endif diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 832bce07c385..500f7ed8c58c 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -484,13 +484,13 @@ enum { }; #define CPSW_STAT(m) CPSW_STATS, \ - sizeof(((struct cpsw_hw_stats *)0)->m), \ + FIELD_SIZEOF(struct cpsw_hw_stats, m), \ offsetof(struct cpsw_hw_stats, m) #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ - sizeof(((struct cpdma_chan_stats *)0)->m), \ + FIELD_SIZEOF(struct cpdma_chan_stats, m), \ offsetof(struct cpdma_chan_stats, m) #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ - sizeof(((struct cpdma_chan_stats *)0)->m), \ + FIELD_SIZEOF(struct cpdma_chan_stats, m), \ offsetof(struct cpdma_chan_stats, m) static const struct cpsw_stats cpsw_gstrings_stats[] = { @@ -570,16 +570,14 @@ static inline int cpsw_get_slave_port(u32 slave_num) return slave_num + 1; } -static void cpsw_add_mcast(struct cpsw_priv *priv, u8 *addr) +static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr) { struct cpsw_common *cpsw = priv->cpsw; if (cpsw->data.dual_emac) { struct cpsw_slave *slave = cpsw->slaves + priv->emac_port; - int slave_port = cpsw_get_slave_port(slave->slave_num); - cpsw_ale_add_mcast(cpsw->ale, addr, - 1 << slave_port | ALE_PORT_HOST, + cpsw_ale_add_mcast(cpsw->ale, addr, ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0); return; } @@ -642,6 +640,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) /* Clear all mcast from ALE */ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1); + __dev_mc_unsync(ndev, NULL); /* Flood All Unicast Packets to Host port */ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); @@ -662,16 +661,35 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) } } -static void cpsw_ndo_set_rx_mode(struct net_device *ndev) +static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + cpsw_add_mcast(priv, addr); + return 0; +} + +static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr) { struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; - int vid; + int vid, flags; - if (cpsw->data.dual_emac) + if (cpsw->data.dual_emac) { vid = cpsw->slaves[priv->emac_port].port_vlan; - else - vid = cpsw->data.default_vlan; + flags = ALE_VLAN; + } else { + vid = 0; + flags = 0; + } + + cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid); + return 0; +} + +static void cpsw_ndo_set_rx_mode(struct net_device *ndev) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); if (ndev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ @@ -684,19 +702,9 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) } /* Restore allmulti on vlans if necessary */ - cpsw_ale_set_allmulti(cpsw->ale, priv->ndev->flags & IFF_ALLMULTI); - - /* Clear all mcast from ALE */ - cpsw_ale_flush_multicast(cpsw->ale, ALE_ALL_PORTS, vid); + cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI); - if (!netdev_mc_empty(ndev)) { - struct netdev_hw_addr *ha; - - /* program multicast address list into ALE register */ - netdev_for_each_mc_addr(ha, ndev) { - cpsw_add_mcast(priv, ha->addr); - } - } + __dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr); } static void cpsw_intr_enable(struct cpsw_common *cpsw) @@ -1410,7 +1418,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries( cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask, port_mask, port_mask, 0); cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, - port_mask, ALE_VLAN, slave->port_vlan, 0); + ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0); cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan); @@ -1956,6 +1964,7 @@ static int cpsw_ndo_stop(struct net_device *ndev) struct cpsw_common *cpsw = priv->cpsw; cpsw_info(priv, ifdown, "shutting down cpsw device\n"); + __dev_mc_unsync(priv->ndev, cpsw_del_mc_addr); netif_tx_stop_all_queues(priv->ndev); netif_carrier_off(priv->ndev); @@ -2293,16 +2302,19 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, { int ret; int unreg_mcast_mask = 0; + int mcast_mask; u32 port_mask; struct cpsw_common *cpsw = priv->cpsw; if (cpsw->data.dual_emac) { port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST; + mcast_mask = ALE_PORT_HOST; if (priv->ndev->flags & IFF_ALLMULTI) - unreg_mcast_mask = port_mask; + unreg_mcast_mask = mcast_mask; } else { port_mask = ALE_ALL_PORTS; + mcast_mask = port_mask; if (priv->ndev->flags & IFF_ALLMULTI) unreg_mcast_mask = ALE_ALL_PORTS; @@ -2321,7 +2333,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, goto clean_vid; ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, - port_mask, ALE_VLAN, vid, 0); + mcast_mask, ALE_VLAN, vid, 0); if (ret != 0) goto clean_vlan_ucast; return 0; @@ -3658,8 +3670,7 @@ static int cpsw_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int cpsw_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct cpsw_common *cpsw = ndev_to_cpsw(ndev); if (cpsw->data.dual_emac) { @@ -3682,8 +3693,7 @@ static int cpsw_suspend(struct device *dev) static int cpsw_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct cpsw_common *cpsw = ndev_to_cpsw(ndev); /* Select default pin state */ diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 5766225a4ce1..798c989d5d93 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -136,7 +136,7 @@ static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr) addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8); } -static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr) +static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr) { int i; @@ -175,7 +175,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry) return idx; } -static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid) +static int cpsw_ale_match_addr(struct cpsw_ale *ale, const u8 *addr, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS]; int type, idx; @@ -309,7 +309,7 @@ static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry, } } -int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, +int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port, int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; @@ -336,7 +336,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, } EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast); -int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, +int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port, int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; @@ -352,7 +352,7 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, } EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast); -int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, +int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, int flags, u16 vid, int mcast_state) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; @@ -386,7 +386,7 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, } EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast); -int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, +int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index d4fe9016429b..cd07a3e96d57 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -105,13 +105,13 @@ void cpsw_ale_start(struct cpsw_ale *ale); void cpsw_ale_stop(struct cpsw_ale *ale); int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid); -int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, +int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port, int flags, u16 vid); -int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, +int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port, int flags, u16 vid); -int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, +int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, int flags, u16 vid, int mcast_state); -int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, +int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, int flags, u16 vid); int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, int reg_mcast, int unreg_mcast); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index f270beebb428..9153db120352 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -2002,8 +2002,7 @@ static int davinci_emac_remove(struct platform_device *pdev) static int davinci_emac_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); if (netif_running(ndev)) emac_dev_stop(ndev); @@ -2013,8 +2012,7 @@ static int davinci_emac_suspend(struct device *dev) static int davinci_emac_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); if (netif_running(ndev)) emac_dev_open(ndev); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index a1d335a3c5e4..1f612268c998 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -225,17 +225,6 @@ static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap) return 0; } -static const char *netcp_node_name(struct device_node *node) -{ - const char *name; - - if (of_property_read_string(node, "label", &name) < 0) - name = node->name; - if (!name) - name = "unknown"; - return name; -} - /* Module management routines */ static int netcp_register_interface(struct netcp_intf *netcp) { @@ -267,8 +256,13 @@ static int netcp_module_probe(struct netcp_device *netcp_device, } for_each_available_child_of_node(devices, child) { - const char *name = netcp_node_name(child); + const char *name; + char node_name[32]; + if (of_property_read_string(node, "label", &name) < 0) { + snprintf(node_name, sizeof(node_name), "%pOFn", child); + name = node_name; + } if (!strcasecmp(module->name, name)) break; } @@ -2209,8 +2203,8 @@ static int netcp_probe(struct platform_device *pdev) for_each_available_child_of_node(interfaces, child) { ret = netcp_create_interface(netcp_device, child); if (ret) { - dev_err(dev, "could not create interface(%s)\n", - child->name); + dev_err(dev, "could not create interface(%pOFn)\n", + child); goto probe_quit_interface; } } diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 72b98e27c992..0397ccb6597e 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3137,15 +3137,15 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, for_each_child_of_node(node, port) { slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL); if (!slave) { - dev_err(dev, "memory alloc failed for secondary port(%s), skipping...\n", - port->name); + dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n", + port); continue; } if (init_slave(gbe_dev, slave, port)) { dev_err(dev, - "Failed to initialize secondary port(%s), skipping...\n", - port->name); + "Failed to initialize secondary port(%pOFn), skipping...\n", + port); devm_kfree(dev, slave); continue; } @@ -3239,8 +3239,8 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't xlate xgbe of node(%s) ss address at %d\n", - node->name, XGBE_SS_REG_INDEX); + "Can't xlate xgbe of node(%pOFn) ss address at %d\n", + node, XGBE_SS_REG_INDEX); return ret; } @@ -3254,8 +3254,8 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't xlate xgbe of node(%s) sm address at %d\n", - node->name, XGBE_SM_REG_INDEX); + "Can't xlate xgbe of node(%pOFn) sm address at %d\n", + node, XGBE_SM_REG_INDEX); return ret; } @@ -3269,8 +3269,8 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't xlate xgbe serdes of node(%s) address at %d\n", - node->name, XGBE_SERDES_REG_INDEX); + "Can't xlate xgbe serdes of node(%pOFn) address at %d\n", + node, XGBE_SERDES_REG_INDEX); return ret; } @@ -3347,8 +3347,8 @@ static int get_gbe_resource_version(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't translate of node(%s) of gbe ss address at %d\n", - node->name, GBE_SS_REG_INDEX); + "Can't translate of node(%pOFn) of gbe ss address at %d\n", + node, GBE_SS_REG_INDEX); return ret; } @@ -3372,8 +3372,8 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't translate of gbe node(%s) address at index %d\n", - node->name, GBE_SGMII34_REG_INDEX); + "Can't translate of gbe node(%pOFn) address at index %d\n", + node, GBE_SGMII34_REG_INDEX); return ret; } @@ -3388,8 +3388,8 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't translate of gbe node(%s) address at index %d\n", - node->name, GBE_SM_REG_INDEX); + "Can't translate of gbe node(%pOFn) address at index %d\n", + node, GBE_SM_REG_INDEX); return ret; } @@ -3498,8 +3498,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't translate of gbenu node(%s) addr at index %d\n", - node->name, GBENU_SM_REG_INDEX); + "Can't translate of gbenu node(%pOFn) addr at index %d\n", + node, GBENU_SM_REG_INDEX); return ret; } @@ -3642,7 +3642,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, gbe_dev->ss_regs); } else { - dev_err(dev, "unknown GBE node(%s)\n", node->name); + dev_err(dev, "unknown GBE node(%pOFn)\n", node); ret = -ENODEV; } @@ -3667,8 +3667,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, for_each_child_of_node(interfaces, interface) { ret = of_property_read_u32(interface, "slave-port", &slave_num); if (ret) { - dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n", - interface->name); + dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n", + interface); continue; } gbe_dev->num_slaves++; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 88d74aef218a..75237c81c63d 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -845,9 +845,9 @@ static int gelic_card_kick_txdma(struct gelic_card *card, * @skb: packet to send out * @netdev: interface device structure * - * returns 0 on success, <0 on failure + * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure */ -int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) +netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct gelic_card *card = netdev_card(netdev); struct gelic_descr *descr; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index 003d0452d9cb..fbbf9b54b173 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -370,7 +370,7 @@ void gelic_card_up(struct gelic_card *card); void gelic_card_down(struct gelic_card *card); int gelic_net_open(struct net_device *netdev); int gelic_net_stop(struct net_device *netdev); -int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); void gelic_net_set_multi(struct net_device *netdev); void gelic_net_tx_timeout(struct net_device *netdev); int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card); diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 302079e22b06..00ab417694ad 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -1094,7 +1094,7 @@ static int gelic_wl_get_encode(struct net_device *netdev, struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); struct iw_point *enc = &data->encoding; unsigned long irqflag; - unsigned int key_index, index_specified; + unsigned int key_index; int ret = 0; pr_debug("%s: <-\n", __func__); @@ -1105,13 +1105,10 @@ static int gelic_wl_get_encode(struct net_device *netdev, return -EINVAL; spin_lock_irqsave(&wl->lock, irqflag); - if (key_index) { - index_specified = 1; + if (key_index) key_index--; - } else { - index_specified = 0; + else key_index = wl->current_key; - } if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { switch (wl->auth_method) { diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index d925b8203996..23417266b7ec 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -880,9 +880,9 @@ out: * @skb: packet to send out * @netdev: interface device structure * - * returns 0 on success, !0 on failure + * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure */ -static int +static netdev_tx_t spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) { int cnt; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index cce9c9ed46aa..6a71c2c0f17d 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -474,7 +474,8 @@ static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_ /* Index to functions, as function prototypes. */ static int tc35815_open(struct net_device *dev); -static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t tc35815_send_packet(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t tc35815_interrupt(int irq, void *dev_id); static int tc35815_rx(struct net_device *dev, int limit); static int tc35815_poll(struct napi_struct *napi, int budget); @@ -628,7 +629,7 @@ static int tc_mii_probe(struct net_device *dev) phy_attached_info(phydev); /* mask with MAC supported features */ - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); dropmask = 0; if (options.speed == 10) dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; @@ -1248,7 +1249,8 @@ tc35815_open(struct net_device *dev) * invariant will hold if you make sure that the netif_*_queue() * calls are done at the proper times. */ -static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); struct TxFD *txfd; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 80fdbff67d82..f9da5d6172e3 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -661,8 +661,7 @@ static int w5300_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int w5300_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct w5300_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) { @@ -676,8 +675,7 @@ static int w5300_suspend(struct device *dev) static int w5300_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct w5300_priv *priv = netdev_priv(ndev); if (!netif_running(ndev)) { diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 60abc9250f56..2241f9897092 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -674,7 +674,8 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) return 0; } -static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct cdmac_bd *cur_p; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index f24f48f33802..12a14609ec47 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -653,7 +653,8 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp, * start the transmission. Additionally if checksum offloading is supported, * it populates AXI Stream Control fields with appropriate values. */ -static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { u32 ii; u32 num_frag; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 42f1f518dad6..639e3e99af46 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -941,8 +941,7 @@ static int xemaclite_open(struct net_device *dev) } /* EmacLite doesn't support giga-bit speeds */ - lp->phy_dev->supported &= (PHY_BASIC_FEATURES); - lp->phy_dev->advertising = lp->phy_dev->supported; + phy_set_max_speed(lp->phy_dev, SPEED_100); /* Don't advertise 1000BASE-T Full/Half duplex speeds */ phy_write(lp->phy_dev, MII_CTRL1000, 0); @@ -1020,9 +1019,10 @@ static int xemaclite_close(struct net_device *dev) * deferred and the Tx queue is stopped so that the deferred socket buffer can * be transmitted when the Emaclite device is free to transmit data. * - * Return: 0, always. + * Return: NETDEV_TX_OK, always. */ -static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) +static netdev_tx_t +xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) { struct net_local *lp = netdev_priv(dev); struct sk_buff *new_skb; @@ -1044,7 +1044,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) /* Take the time stamp now, since we can't do this in an ISR. */ skb_tx_timestamp(new_skb); spin_unlock_irqrestore(&lp->reset_lock, flags); - return 0; + return NETDEV_TX_OK; } spin_unlock_irqrestore(&lp->reset_lock, flags); @@ -1053,7 +1053,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) dev->stats.tx_bytes += len; dev_consume_skb_any(new_skb); - return 0; + return NETDEV_TX_OK; } /** |