summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c525
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c801
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h456
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c964
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c161
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c1361
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h52
17 files changed, 3326 insertions, 1199 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 868f4a1d0f72..9cf233d085d8 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -62,6 +62,7 @@ extern const char ice_drv_ver[];
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
+#define ICE_INVAL_VFID 256
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
@@ -122,6 +123,7 @@ struct ice_sw {
enum ice_state {
__ICE_DOWN,
__ICE_NEEDS_RESTART,
+ __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
__ICE_RESET_RECOVERY_PENDING, /* set by driver when reset starts */
__ICE_PFR_REQ, /* set by driver and peers */
__ICE_CORER_REQ, /* set by driver and peers */
@@ -132,9 +134,11 @@ enum ice_state {
__ICE_SUSPENDED, /* set on module remove path */
__ICE_RESET_FAILED, /* set by reset/rebuild */
__ICE_ADMINQ_EVENT_PENDING,
+ __ICE_MDD_EVENT_PENDING,
__ICE_FLTR_OVERFLOW_PROMISC,
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
+ __ICE_SERVICE_DIS,
__ICE_STATE_NBITS /* must be last */
};
@@ -270,6 +274,9 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded; /* has previous stats been loaded */
+ u32 tx_timeout_count;
+ unsigned long tx_timeout_last_recovery;
+ u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
};
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index a0614f472658..f8dfd675486c 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -443,6 +443,8 @@ struct ice_aqc_vsi_props {
u8 reserved[24];
};
+#define ICE_MAX_NUM_RECIPES 64
+
/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
*/
struct ice_aqc_sw_rules {
@@ -771,9 +773,8 @@ struct ice_aqc_layer_props {
u8 chunk_size;
__le16 max_device_nodes;
__le16 max_pf_nodes;
- u8 rsvd0[2];
- __le16 max_shared_rate_lmtr;
- __le16 max_children;
+ u8 rsvd0[4];
+ __le16 max_sibl_grp_sz;
__le16 max_cir_rl_profiles;
__le16 max_eir_rl_profiles;
__le16 max_srl_profiles;
@@ -919,9 +920,11 @@ struct ice_aqc_set_phy_cfg_data {
u8 caps;
#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
-#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
-#define ICE_AQ_PHY_ENA_LINK BIT(3)
-#define ICE_AQ_PHY_ENA_ATOMIC_LINK BIT(5)
+#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
+#define ICE_AQ_PHY_ENA_LINK BIT(3)
+#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define ICE_AQ_PHY_ENA_LESM BIT(6)
+#define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7)
u8 low_power_ctrl;
__le16 eee_cap; /* Value from ice_aqc_get_phy_caps */
__le16 eeer_value;
@@ -1203,6 +1206,84 @@ struct ice_aqc_dis_txq {
struct ice_aqc_dis_txq_item qgrps[1];
};
+/* Configure Firmware Logging Command (indirect 0xFF09)
+ * Logging Information Read Response (indirect 0xFF10)
+ * Note: The 0xFF10 command has no input parameters.
+ */
+struct ice_aqc_fw_logging {
+ u8 log_ctrl;
+#define ICE_AQC_FW_LOG_AQ_EN BIT(0)
+#define ICE_AQC_FW_LOG_UART_EN BIT(1)
+ u8 rsvd0;
+ u8 log_ctrl_valid; /* Not used by 0xFF10 Response */
+#define ICE_AQC_FW_LOG_AQ_VALID BIT(0)
+#define ICE_AQC_FW_LOG_UART_VALID BIT(1)
+ u8 rsvd1[5];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+enum ice_aqc_fw_logging_mod {
+ ICE_AQC_FW_LOG_ID_GENERAL = 0,
+ ICE_AQC_FW_LOG_ID_CTRL,
+ ICE_AQC_FW_LOG_ID_LINK,
+ ICE_AQC_FW_LOG_ID_LINK_TOPO,
+ ICE_AQC_FW_LOG_ID_DNL,
+ ICE_AQC_FW_LOG_ID_I2C,
+ ICE_AQC_FW_LOG_ID_SDP,
+ ICE_AQC_FW_LOG_ID_MDIO,
+ ICE_AQC_FW_LOG_ID_ADMINQ,
+ ICE_AQC_FW_LOG_ID_HDMA,
+ ICE_AQC_FW_LOG_ID_LLDP,
+ ICE_AQC_FW_LOG_ID_DCBX,
+ ICE_AQC_FW_LOG_ID_DCB,
+ ICE_AQC_FW_LOG_ID_NETPROXY,
+ ICE_AQC_FW_LOG_ID_NVM,
+ ICE_AQC_FW_LOG_ID_AUTH,
+ ICE_AQC_FW_LOG_ID_VPD,
+ ICE_AQC_FW_LOG_ID_IOSF,
+ ICE_AQC_FW_LOG_ID_PARSER,
+ ICE_AQC_FW_LOG_ID_SW,
+ ICE_AQC_FW_LOG_ID_SCHEDULER,
+ ICE_AQC_FW_LOG_ID_TXQ,
+ ICE_AQC_FW_LOG_ID_RSVD,
+ ICE_AQC_FW_LOG_ID_POST,
+ ICE_AQC_FW_LOG_ID_WATCHDOG,
+ ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
+ ICE_AQC_FW_LOG_ID_MNG,
+ ICE_AQC_FW_LOG_ID_MAX,
+};
+
+/* This is the buffer for both of the logging commands.
+ * The entry array size depends on the datalen parameter in the descriptor.
+ * There will be a total of datalen / 2 entries.
+ */
+struct ice_aqc_fw_logging_data {
+ __le16 entry[1];
+#define ICE_AQC_FW_LOG_ID_S 0
+#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S)
+
+#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */
+#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */
+
+#define ICE_AQC_FW_LOG_EN_S 12
+#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S)
+#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */
+#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */
+#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */
+#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */
+};
+
+/* Get/Clear FW Log (indirect 0xFF11) */
+struct ice_aqc_get_clear_fw_log {
+ u8 flags;
+#define ICE_AQC_FW_LOG_CLEAR BIT(0)
+#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1)
+ u8 rsvd1[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -1252,6 +1333,9 @@ struct ice_aq_desc {
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
+ struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
+ struct ice_aqc_fw_logging fw_logging;
+ struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
@@ -1349,6 +1433,9 @@ enum ice_adminq_opc {
/* TX queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
+
+ /* debug commands */
+ ice_aqc_opc_fw_logging = 0xFF09,
};
#endif /* _ICE_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 661beea6af79..0847dbf9d42f 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -7,16 +7,16 @@
#define ICE_PF_RESET_WAIT_COUNT 200
-#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \
- wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \
+#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
+ wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
((ICE_RX_OPC_MDID << \
GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
(((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
-#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \
- wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \
+#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
+ wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
(((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
(((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
@@ -125,7 +125,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
*
* Returns the various PHY capabilities supported on the Port (0x0600)
*/
-static enum ice_status
+enum ice_status
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *pcaps,
struct ice_sq_cd *cd)
@@ -290,30 +290,85 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
}
/**
- * ice_init_flex_parser - initialize rx flex parser
+ * ice_init_flex_flags
* @hw: pointer to the hardware structure
+ * @prof_id: Rx Descriptor Builder profile ID
*
- * Function to initialize flex descriptors
+ * Function to initialize Rx flex flags
*/
-static void ice_init_flex_parser(struct ice_hw *hw)
+static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
{
u8 idx = 0;
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0);
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1);
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2);
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100,
- idx++);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN,
- ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
+ /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
+ * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
+ * flexiflags1[3:0] - Not used for flag programming
+ * flexiflags2[7:0] - Tunnel and VLAN types
+ * 2 invalid fields in last index
+ */
+ switch (prof_id) {
+ /* Rx flex flags are currently programmed for the NIC profiles only.
+ * Different flag bit programming configurations can be added per
+ * profile as needed.
+ */
+ case ICE_RXDID_FLEX_NIC:
+ case ICE_RXDID_FLEX_NIC_2:
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
+ ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
+ ICE_RXFLG_FIN, idx++);
+ /* flex flag 1 is not used for flexi-flag programming, skipping
+ * these four FLG64 bits.
+ */
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
+ ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
+ ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
+ ICE_RXFLG_EVLAN_x9100, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
+ ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
+ ICE_RXFLG_TNL0, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
+ ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
+ break;
+
+ default:
+ ice_debug(hw, ICE_DBG_INIT,
+ "Flag programming for profile ID %d not supported\n",
+ prof_id);
+ }
+}
+
+/**
+ * ice_init_flex_flds
+ * @hw: pointer to the hardware structure
+ * @prof_id: Rx Descriptor Builder profile ID
+ *
+ * Function to initialize flex descriptors
+ */
+static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
+{
+ enum ice_flex_rx_mdid mdid;
+
+ switch (prof_id) {
+ case ICE_RXDID_FLEX_NIC:
+ case ICE_RXDID_FLEX_NIC_2:
+ ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
+ ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
+ ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
+
+ mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
+ ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
+
+ ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
+
+ ice_init_flex_flags(hw, prof_id);
+ break;
+
+ default:
+ ice_debug(hw, ICE_DBG_INIT,
+ "Field init for profile ID %d not supported\n",
+ prof_id);
+ }
}
/**
@@ -333,20 +388,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
- mutex_init(&sw->mac_list_lock);
- INIT_LIST_HEAD(&sw->mac_list_head);
-
- mutex_init(&sw->vlan_list_lock);
- INIT_LIST_HEAD(&sw->vlan_list_head);
-
- mutex_init(&sw->eth_m_list_lock);
- INIT_LIST_HEAD(&sw->eth_m_list_head);
-
- mutex_init(&sw->promisc_list_lock);
- INIT_LIST_HEAD(&sw->promisc_list_head);
-
- mutex_init(&sw->mac_vlan_list_lock);
- INIT_LIST_HEAD(&sw->mac_vlan_list_head);
+ ice_init_def_sw_recp(hw);
return 0;
}
@@ -360,22 +402,201 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
struct ice_switch_info *sw = hw->switch_info;
struct ice_vsi_list_map_info *v_pos_map;
struct ice_vsi_list_map_info *v_tmp_map;
+ struct ice_sw_recipe *recps;
+ u8 i;
list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
list_entry) {
list_del(&v_pos_map->list_entry);
devm_kfree(ice_hw_to_dev(hw), v_pos_map);
}
+ recps = hw->switch_info->recp_list;
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+
+ recps[i].root_rid = i;
+ mutex_destroy(&recps[i].filt_rule_lock);
+ list_for_each_entry_safe(lst_itr, tmp_entry,
+ &recps[i].filt_rules, list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
+ }
- mutex_destroy(&sw->mac_list_lock);
- mutex_destroy(&sw->vlan_list_lock);
- mutex_destroy(&sw->eth_m_list_lock);
- mutex_destroy(&sw->promisc_list_lock);
- mutex_destroy(&sw->mac_vlan_list_lock);
-
+ devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
devm_kfree(ice_hw_to_dev(hw), sw);
}
+#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
+ (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
+#define ICE_FW_LOG_DESC_SIZE_MAX \
+ ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
+
+/**
+ * ice_cfg_fw_log - configure FW logging
+ * @hw: pointer to the hw struct
+ * @enable: enable certain FW logging events if true, disable all if false
+ *
+ * This function enables/disables the FW logging via Rx CQ events and a UART
+ * port based on predetermined configurations. FW logging via the Rx CQ can be
+ * enabled/disabled for individual PF's. However, FW logging via the UART can
+ * only be enabled/disabled for all PFs on the same device.
+ *
+ * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
+ * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
+ * before initializing the device.
+ *
+ * When re/configuring FW logging, callers need to update the "cfg" elements of
+ * the hw->fw_log.evnts array with the desired logging event configurations for
+ * modules of interest. When disabling FW logging completely, the callers can
+ * just pass false in the "enable" parameter. On completion, the function will
+ * update the "cur" element of the hw->fw_log.evnts array with the resulting
+ * logging event configurations of the modules that are being re/configured. FW
+ * logging modules that are not part of a reconfiguration operation retain their
+ * previous states.
+ *
+ * Before resetting the device, it is recommended that the driver disables FW
+ * logging before shutting down the control queue. When disabling FW logging
+ * ("enable" = false), the latest configurations of FW logging events stored in
+ * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
+ * a device reset.
+ *
+ * When enabling FW logging to emit log messages via the Rx CQ during the
+ * device's initialization phase, a mechanism alternative to interrupt handlers
+ * needs to be used to extract FW log messages from the Rx CQ periodically and
+ * to prevent the Rx CQ from being full and stalling other types of control
+ * messages from FW to SW. Interrupts are typically disabled during the device's
+ * initialization phase.
+ */
+static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
+{
+ struct ice_aqc_fw_logging_data *data = NULL;
+ struct ice_aqc_fw_logging *cmd;
+ enum ice_status status = 0;
+ u16 i, chgs = 0, len = 0;
+ struct ice_aq_desc desc;
+ u8 actv_evnts = 0;
+ void *buf = NULL;
+
+ if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
+ return 0;
+
+ /* Disable FW logging only when the control queue is still responsive */
+ if (!enable &&
+ (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
+ return 0;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
+ cmd = &desc.params.fw_logging;
+
+ /* Indicate which controls are valid */
+ if (hw->fw_log.cq_en)
+ cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
+
+ if (hw->fw_log.uart_en)
+ cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
+
+ if (enable) {
+ /* Fill in an array of entries with FW logging modules and
+ * logging events being reconfigured.
+ */
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
+ u16 val;
+
+ /* Keep track of enabled event types */
+ actv_evnts |= hw->fw_log.evnts[i].cfg;
+
+ if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
+ continue;
+
+ if (!data) {
+ data = devm_kzalloc(ice_hw_to_dev(hw),
+ ICE_FW_LOG_DESC_SIZE_MAX,
+ GFP_KERNEL);
+ if (!data)
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ val = i << ICE_AQC_FW_LOG_ID_S;
+ val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
+ data->entry[chgs++] = cpu_to_le16(val);
+ }
+
+ /* Only enable FW logging if at least one module is specified.
+ * If FW logging is currently enabled but all modules are not
+ * enabled to emit log messages, disable FW logging altogether.
+ */
+ if (actv_evnts) {
+ /* Leave if there is effectively no change */
+ if (!chgs)
+ goto out;
+
+ if (hw->fw_log.cq_en)
+ cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
+
+ if (hw->fw_log.uart_en)
+ cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
+
+ buf = data;
+ len = ICE_FW_LOG_DESC_SIZE(chgs);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ }
+ }
+
+ status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
+ if (!status) {
+ /* Update the current configuration to reflect events enabled.
+ * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
+ * logging mode is enabled for the device. They do not reflect
+ * actual modules being enabled to emit log messages. So, their
+ * values remain unchanged even when all modules are disabled.
+ */
+ u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
+
+ hw->fw_log.actv_evnts = actv_evnts;
+ for (i = 0; i < cnt; i++) {
+ u16 v, m;
+
+ if (!enable) {
+ /* When disabling all FW logging events as part
+ * of device's de-initialization, the original
+ * configurations are retained, and can be used
+ * to reconfigure FW logging later if the device
+ * is re-initialized.
+ */
+ hw->fw_log.evnts[i].cur = 0;
+ continue;
+ }
+
+ v = le16_to_cpu(data->entry[i]);
+ m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
+ hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
+ }
+ }
+
+out:
+ if (data)
+ devm_kfree(ice_hw_to_dev(hw), data);
+
+ return status;
+}
+
+/**
+ * ice_output_fw_log
+ * @hw: pointer to the hw struct
+ * @desc: pointer to the AQ message descriptor
+ * @buf: pointer to the buffer accompanying the AQ message
+ *
+ * Formats a FW Log message and outputs it via the standard driver logs.
+ */
+void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
+{
+ ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n");
+ ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf,
+ le16_to_cpu(desc->datalen));
+ ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
+}
+
/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
@@ -410,6 +631,11 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
+ /* Enable FW logging. Not fatal if this fails. */
+ status = ice_cfg_fw_log(hw, true);
+ if (status)
+ ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
+
status = ice_clear_pf_cfg(hw);
if (status)
goto err_unroll_cqinit;
@@ -472,6 +698,13 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
+ /* need a valid SW entry point to build a Tx tree */
+ if (!hw->sw_entry_point_layer) {
+ ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
+ status = ICE_ERR_CFG;
+ goto err_unroll_sched;
+ }
+
status = ice_init_fltr_mgmt_struct(hw);
if (status)
goto err_unroll_sched;
@@ -494,7 +727,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
- ice_init_flex_parser(hw);
+ ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
+ ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
return 0;
@@ -515,15 +749,18 @@ err_unroll_cqinit:
*/
void ice_deinit_hw(struct ice_hw *hw)
{
+ ice_cleanup_fltr_mgmt_struct(hw);
+
ice_sched_cleanup_all(hw);
- ice_shutdown_all_ctrlq(hw);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
hw->port_info = NULL;
}
- ice_cleanup_fltr_mgmt_struct(hw);
+ /* Attempt to disable FW logging before shutting down control queues */
+ ice_cfg_fw_log(hw, false);
+ ice_shutdown_all_ctrlq(hw);
}
/**
@@ -652,6 +889,8 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
val = GLGEN_RTRIG_GLOBR_M;
break;
+ default:
+ return ICE_ERR_PARAM;
}
val |= rd32(hw, GLGEN_RTRIG);
@@ -904,7 +1143,22 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* @timeout: the maximum time in ms that the driver may hold the resource
* @cd: pointer to command details structure or NULL
*
- * requests common resource using the admin queue commands (0x0008)
+ * Requests common resource using the admin queue commands (0x0008).
+ * When attempting to acquire the Global Config Lock, the driver can
+ * learn of three states:
+ * 1) ICE_SUCCESS - acquired lock, and can perform download package
+ * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
+ * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
+ * successfully downloaded the package; the driver does
+ * not have to download the package and can continue
+ * loading
+ *
+ * Note that if the caller is in an acquire lock, perform action, release lock
+ * phase of operation, it is possible that the FW may detect a timeout and issue
+ * a CORER. In this case, the driver will receive a CORER interrupt and will
+ * have to determine its cause. The calling thread that is handling this flow
+ * will likely get an error propagated back to it indicating the Download
+ * Package, Update Package or the Release Resource AQ commands timed out.
*/
static enum ice_status
ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
@@ -922,13 +1176,43 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
cmd_resp->res_id = cpu_to_le16(res);
cmd_resp->access_type = cpu_to_le16(access);
cmd_resp->res_number = cpu_to_le32(sdp_number);
+ cmd_resp->timeout = cpu_to_le32(*timeout);
+ *timeout = 0;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
/* The completion specifies the maximum time in ms that the driver
* may hold the resource in the Timeout field.
- * If the resource is held by someone else, the command completes with
- * busy return value and the timeout field indicates the maximum time
- * the current owner of the resource has to free it.
+ */
+
+ /* Global config lock response utilizes an additional status field.
+ *
+ * If the Global config lock resource is held by some other driver, the
+ * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
+ * and the timeout field indicates the maximum time the current owner
+ * of the resource has to free it.
+ */
+ if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
+ if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+ return 0;
+ } else if (le16_to_cpu(cmd_resp->status) ==
+ ICE_AQ_RES_GLBL_IN_PROG) {
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+ return ICE_ERR_AQ_ERROR;
+ } else if (le16_to_cpu(cmd_resp->status) ==
+ ICE_AQ_RES_GLBL_DONE) {
+ return ICE_ERR_AQ_NO_WORK;
+ }
+
+ /* invalid FW response, force a timeout immediately */
+ *timeout = 0;
+ return ICE_ERR_AQ_ERROR;
+ }
+
+ /* If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
*/
if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
@@ -967,30 +1251,28 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
* @hw: pointer to the HW structure
* @res: resource id
* @access: access type (read or write)
+ * @timeout: timeout in milliseconds
*
* This function will attempt to acquire the ownership of a resource.
*/
enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
- enum ice_aq_res_access_type access)
+ enum ice_aq_res_access_type access, u32 timeout)
{
#define ICE_RES_POLLING_DELAY_MS 10
u32 delay = ICE_RES_POLLING_DELAY_MS;
+ u32 time_left = timeout;
enum ice_status status;
- u32 time_left = 0;
- u32 timeout;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- /* An admin queue return code of ICE_AQ_RC_EEXIST means that another
- * driver has previously acquired the resource and performed any
- * necessary updates; in this case the caller does not obtain the
- * resource and has no further work to do.
+ /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
*/
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
- status = ICE_ERR_AQ_NO_WORK;
+ if (status == ICE_ERR_AQ_NO_WORK)
goto ice_acquire_res_exit;
- }
if (status)
ice_debug(hw, ICE_DBG_RES,
@@ -1003,11 +1285,9 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
timeout = (timeout > delay) ? timeout - delay : 0;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
+ if (status == ICE_ERR_AQ_NO_WORK)
/* lock free, but no work to do */
- status = ICE_ERR_AQ_NO_WORK;
break;
- }
if (!status)
/* lock acquired */
@@ -1307,6 +1587,110 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
}
/**
+ * ice_get_link_speed_based_on_phy_type - returns link speed
+ * @phy_type_low: lower part of phy_type
+ *
+ * This helper function will convert a phy_type_low to its corresponding link
+ * speed.
+ * Note: In the structure of phy_type_low, there should be one bit set, as
+ * this function will convert one phy type to its speed.
+ * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ */
+static u16
+ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
+{
+ u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
+
+ switch (phy_type_low) {
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
+ case ICE_PHY_TYPE_LOW_100M_SGMII:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
+ case ICE_PHY_TYPE_LOW_1G_SGMII:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
+ break;
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
+ break;
+ default:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ return speed_phy_type_low;
+}
+
+/**
+ * ice_update_phy_type
+ * @phy_type_low: pointer to the lower part of phy_type
+ * @link_speeds_bitmap: targeted link speeds bitmap
+ *
+ * Note: For the link_speeds_bitmap structure, you can check it at
+ * [ice_aqc_get_link_status->link_speed]. Caller can pass in
+ * link_speeds_bitmap include multiple speeds.
+ *
+ * The value of phy_type_low will present a certain link speed. This helper
+ * function will turn on bits in the phy_type_low based on the value of
+ * link_speeds_bitmap input parameter.
+ */
+void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap)
+{
+ u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
+ u64 pt_low;
+ int index;
+
+ /* We first check with low part of phy_type */
+ for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
+ pt_low = BIT_ULL(index);
+ speed = ice_get_link_speed_based_on_phy_type(pt_low);
+
+ if (link_speeds_bitmap & speed)
+ *phy_type_low |= BIT_ULL(index);
+ }
+}
+
+/**
* ice_aq_set_phy_cfg
* @hw: pointer to the hw struct
* @lport: logical port number
@@ -1318,19 +1702,18 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* mode as the PF may not have the privilege to set some of the PHY Config
* parameters. This status will be indicated by the command response (0x0601).
*/
-static enum ice_status
+enum ice_status
ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
- struct ice_aqc_set_phy_cfg *cmd;
struct ice_aq_desc desc;
if (!cfg)
return ICE_ERR_PARAM;
- cmd = &desc.params.set_phy;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
- cmd->lport_num = lport;
+ desc.params.set_phy.lport_num = lport;
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
}
@@ -1379,12 +1762,12 @@ out:
* ice_set_fc
* @pi: port information structure
* @aq_failures: pointer to status code, specific to ice_set_fc routine
- * @atomic_restart: enable automatic link update
+ * @ena_auto_link_update: enable automatic link update
*
* Set the requested flow control mode.
*/
enum ice_status
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_aqc_get_phy_caps_data *pcaps;
@@ -1434,8 +1817,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)
int retry_count, retry_max = 10;
/* Auto restart link so settings take effect */
- if (atomic_restart)
- cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK;
+ if (ena_auto_link_update)
+ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
/* Copy over all the old settings */
cfg.phy_type_low = pcaps->phy_type_low;
cfg.low_power_ctrl = pcaps->low_power_ctrl;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 9a5519130af1..aac2d6cadaaf 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -23,7 +23,7 @@ enum ice_status
ice_get_link_status(struct ice_port_info *pi, bool *link_up);
enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
- enum ice_aq_res_access_type access);
+ enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status
@@ -58,12 +58,24 @@ enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
+
+enum ice_status
+ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
+ struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_sq_cd *cd);
+void
+ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap);
enum ice_status
ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart);
+ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
+ struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
+ bool ena_auto_link_update);
+
enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
@@ -83,4 +95,5 @@ enum ice_status
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
+void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 62be72fdc8f3..1fe026a65d75 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -806,6 +806,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
u16 retval = 0;
u32 val = 0;
+ /* if reset is in progress return a soft error */
+ if (hw->reset_ongoing)
+ return ICE_ERR_RESET_ONGOING;
mutex_lock(&cq->sq_lock);
cq->sq_last_status = ICE_AQ_RC_OK;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index c71a9b528d6d..db2c502ae932 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -332,58 +332,473 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
}
-static int
-ice_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *ks)
+/**
+ * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes
+ * @netdev: network interface device structure
+ * @ks: ethtool link ksettings struct to fill out
+ */
+static void ice_phy_type_to_ethtool(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
- bool link_up;
+ u64 phy_types_low;
hw_link_info = &vsi->port_info->phy.link_info;
- link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
+ phy_types_low = vsi->port_info->phy.phy_type_low;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseKX_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseKX_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseX_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseX_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X ||
+ phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseX_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseX_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseSR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseSR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseLR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseLR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseSR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseSR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseKR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseKR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseKR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseKR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseSR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseSR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseLR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseLR4_Full);
+ }
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseT_Full);
+ /* Autoneg PHY types */
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Autoneg);
+ }
+}
- /* set speed and duplex */
- if (link_up) {
- switch (hw_link_info->link_speed) {
- case ICE_AQ_LINK_SPEED_100MB:
- ks->base.speed = SPEED_100;
- break;
- case ICE_AQ_LINK_SPEED_2500MB:
- ks->base.speed = SPEED_2500;
- break;
- case ICE_AQ_LINK_SPEED_5GB:
- ks->base.speed = SPEED_5000;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- ks->base.speed = SPEED_10000;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- ks->base.speed = SPEED_25000;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- ks->base.speed = SPEED_40000;
- break;
- default:
- ks->base.speed = SPEED_UNKNOWN;
- break;
- }
+#define TEST_SET_BITS_TIMEOUT 50
+#define TEST_SET_BITS_SLEEP_MAX 2000
+#define TEST_SET_BITS_SLEEP_MIN 1000
- ks->base.duplex = DUPLEX_FULL;
- } else {
- ks->base.speed = SPEED_UNKNOWN;
- ks->base.duplex = DUPLEX_UNKNOWN;
+/**
+ * ice_get_settings_link_up - Get Link settings for when link is up
+ * @ks: ethtool ksettings to fill in
+ * @netdev: network interface device structure
+ */
+static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
+ struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ethtool_link_ksettings cap_ksettings;
+ struct ice_link_status *link_info;
+ struct ice_vsi *vsi = np->vsi;
+ bool unrecog_phy_low = false;
+
+ link_info = &vsi->port_info->phy.link_info;
+
+ /* Initialize supported and advertised settings based on phy settings */
+ switch (link_info->phy_type_low) {
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100M_SGMII:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_1G_SGMII:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseX_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseKX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseKX_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseX_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseX_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseSR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseLR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseSR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseKR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseSR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseLR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseKR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseKR4_Full);
+ break;
+ default:
+ unrecog_phy_low = true;
+ }
+
+ if (unrecog_phy_low) {
+ /* if we got here and link is up something bad is afoot */
+ netdev_info(netdev, "WARNING: Unrecognized PHY_Low (0x%llx).\n",
+ (u64)link_info->phy_type_low);
}
+ /* Now that we've worked out everything that could be supported by the
+ * current PHY type, get what is supported by the NVM and intersect
+ * them to get what is truly supported
+ */
+ memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings));
+ ice_phy_type_to_ethtool(netdev, &cap_ksettings);
+ ethtool_intersect_link_masks(ks, &cap_ksettings);
+
+ switch (link_info->link_speed) {
+ case ICE_AQ_LINK_SPEED_40GB:
+ ks->base.speed = SPEED_40000;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ ks->base.speed = SPEED_25000;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ ks->base.speed = SPEED_20000;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ ks->base.speed = SPEED_10000;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ ks->base.speed = SPEED_5000;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ ks->base.speed = SPEED_2500;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ ks->base.speed = SPEED_1000;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ ks->base.speed = SPEED_100;
+ break;
+ default:
+ netdev_info(netdev,
+ "WARNING: Unrecognized link_speed (0x%x).\n",
+ link_info->link_speed);
+ break;
+ }
+ ks->base.duplex = DUPLEX_FULL;
+}
+
+/**
+ * ice_get_settings_link_down - Get the Link settings when link is down
+ * @ks: ethtool ksettings to fill in
+ * @netdev: network interface device structure
+ *
+ * Reports link settings that can be determined when link is down
+ */
+static void
+ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
+ struct net_device __always_unused *netdev)
+{
+ /* link is down and the driver needs to fall back on
+ * supported phy types to figure out what info to display
+ */
+ ice_phy_type_to_ethtool(netdev, ks);
+
+ /* With no link, speed and duplex are unknown */
+ ks->base.speed = SPEED_UNKNOWN;
+ ks->base.duplex = DUPLEX_UNKNOWN;
+}
+
+/**
+ * ice_get_link_ksettings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ks: ethtool ksettings
+ *
+ * Reports speed/duplex settings based on media_type
+ */
+static int ice_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_link_status *hw_link_info;
+ struct ice_vsi *vsi = np->vsi;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+ hw_link_info = &vsi->port_info->phy.link_info;
+
+ /* set speed and duplex */
+ if (hw_link_info->link_info & ICE_AQ_LINK_UP)
+ ice_get_settings_link_up(ks, netdev);
+ else
+ ice_get_settings_link_down(ks, netdev);
+
/* set autoneg settings */
- ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE);
+ ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
/* set media type settings */
switch (vsi->port_info->phy.media_type) {
@@ -442,6 +857,311 @@ ice_get_link_ksettings(struct net_device *netdev,
}
/**
+ * ice_ksettings_find_adv_link_speed - Find advertising link speed
+ * @ks: ethtool ksettings
+ */
+static u16
+ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
+{
+ u16 adv_link_speed = 0;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100baseT_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_100MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseKX_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseT_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseX_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 5000baseT_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_5GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseKR_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_10GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseSR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseLR_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_10GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseCR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseSR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseKR_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_25GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseCR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseSR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseLR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseKR4_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_40GB;
+
+ return adv_link_speed;
+}
+
+/**
+ * ice_setup_autoneg
+ * @p: port info
+ * @ks: ethtool_link_ksettings
+ * @config: configuration that will be sent down to FW
+ * @autoneg_enabled: autonegotiation is enabled or not
+ * @autoneg_changed: will there a change in autonegotiation
+ * @netdev: network interface device structure
+ *
+ * Setup PHY autonegotiation feature
+ */
+static int
+ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
+ struct ice_aqc_set_phy_cfg_data *config,
+ u8 autoneg_enabled, u8 *autoneg_changed,
+ struct net_device *netdev)
+{
+ int err = 0;
+
+ *autoneg_changed = 0;
+
+ /* Check autoneg */
+ if (autoneg_enabled == AUTONEG_ENABLE) {
+ /* If autoneg was not already enabled */
+ if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) {
+ /* If autoneg is not supported, return error */
+ if (!ethtool_link_ksettings_test_link_mode(ks,
+ supported,
+ Autoneg)) {
+ netdev_info(netdev, "Autoneg not supported on this phy.\n");
+ err = -EINVAL;
+ } else {
+ /* Autoneg is allowed to change */
+ config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ *autoneg_changed = 1;
+ }
+ }
+ } else {
+ /* If autoneg is currently enabled */
+ if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) {
+ /* If autoneg is supported 10GBASE_T is the only phy
+ * that can disable it, so otherwise return error
+ */
+ if (ethtool_link_ksettings_test_link_mode(ks,
+ supported,
+ Autoneg)) {
+ netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+ err = -EINVAL;
+ } else {
+ /* Autoneg is allowed to change */
+ config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ *autoneg_changed = 1;
+ }
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ice_set_link_ksettings - Set Speed and Duplex
+ * @netdev: network interface device structure
+ * @ks: ethtool ksettings
+ *
+ * Set speed/duplex per media_types advertised/forced
+ */
+static int ice_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
+{
+ u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ethtool_link_ksettings safe_ks, copy_ks;
+ struct ice_aqc_get_phy_caps_data *abilities;
+ u16 adv_link_speed, curr_link_speed, idx;
+ struct ice_aqc_set_phy_cfg_data config;
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_port_info *p;
+ u8 autoneg_changed = 0;
+ enum ice_status status;
+ u64 phy_type_low;
+ int err = 0;
+ bool linkup;
+
+ p = np->vsi->port_info;
+
+ if (!p)
+ return -EOPNOTSUPP;
+
+ /* Check if this is lan vsi */
+ for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) {
+ if (pf->vsi[idx]->type == ICE_VSI_PF) {
+ if (np->vsi != pf->vsi[idx])
+ return -EOPNOTSUPP;
+ break;
+ }
+ }
+
+ if (p->phy.media_type != ICE_MEDIA_BASET &&
+ p->phy.media_type != ICE_MEDIA_FIBER &&
+ p->phy.media_type != ICE_MEDIA_BACKPLANE &&
+ p->phy.media_type != ICE_MEDIA_DA &&
+ p->phy.link_info.link_info & ICE_AQ_LINK_UP)
+ return -EOPNOTSUPP;
+
+ /* copy the ksettings to copy_ks to avoid modifying the original */
+ memcpy(&copy_ks, ks, sizeof(struct ethtool_link_ksettings));
+
+ /* save autoneg out of ksettings */
+ autoneg = copy_ks.base.autoneg;
+
+ memset(&safe_ks, 0, sizeof(safe_ks));
+
+ /* Get link modes supported by hardware.*/
+ ice_phy_type_to_ethtool(netdev, &safe_ks);
+
+ /* and check against modes requested by user.
+ * Return an error if unsupported mode was set.
+ */
+ if (!bitmap_subset(copy_ks.link_modes.advertising,
+ safe_ks.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ /* get our own copy of the bits to check against */
+ memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
+ safe_ks.base.cmd = copy_ks.base.cmd;
+ safe_ks.base.link_mode_masks_nwords =
+ copy_ks.base.link_mode_masks_nwords;
+ ice_get_link_ksettings(netdev, &safe_ks);
+
+ /* set autoneg back to what it currently is */
+ copy_ks.base.autoneg = safe_ks.base.autoneg;
+ /* we don't compare the speed */
+ copy_ks.base.speed = safe_ks.base.speed;
+
+ /* If copy_ks.base and safe_ks.base are not the same now, then they are
+ * trying to set something that we do not support.
+ */
+ if (memcmp(&copy_ks.base, &safe_ks.base,
+ sizeof(struct ethtool_link_settings)))
+ return -EOPNOTSUPP;
+
+ while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX);
+ }
+
+ abilities = devm_kzalloc(&pf->pdev->dev, sizeof(*abilities),
+ GFP_KERNEL);
+ if (!abilities)
+ return -ENOMEM;
+
+ /* Get the current phy config */
+ status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities,
+ NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* Copy abilities to config in case autoneg is not set below */
+ memset(&config, 0, sizeof(struct ice_aqc_set_phy_cfg_data));
+ config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE;
+ if (abilities->caps & ICE_AQC_PHY_AN_MODE)
+ config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ /* Check autoneg */
+ err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed,
+ netdev);
+
+ if (err)
+ goto done;
+
+ /* Call to get the current link speed */
+ p->phy.get_link_info = true;
+ status = ice_get_link_status(p, &linkup);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ curr_link_speed = p->phy.link_info.link_speed;
+ adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
+
+ /* If speed didn't get set, set it to what it currently is.
+ * This is needed because if advertise is 0 (as it is when autoneg
+ * is disabled) then speed won't get set.
+ */
+ if (!adv_link_speed)
+ adv_link_speed = curr_link_speed;
+
+ /* Convert the advertise link speeds to their corresponded PHY_TYPE */
+ ice_update_phy_type(&phy_type_low, adv_link_speed);
+
+ if (!autoneg_changed && adv_link_speed == curr_link_speed) {
+ netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
+ goto done;
+ }
+
+ /* copy over the rest of the abilities */
+ config.low_power_ctrl = abilities->low_power_ctrl;
+ config.eee_cap = abilities->eee_cap;
+ config.eeer_value = abilities->eeer_value;
+ config.link_fec_opt = abilities->link_fec_options;
+
+ /* save the requested speeds */
+ p->phy.link_info.req_speeds = adv_link_speed;
+
+ /* set link and auto negotiation so changes take effect */
+ config.caps |= ICE_AQ_PHY_ENA_LINK;
+
+ if (phy_type_low) {
+ config.phy_type_low = cpu_to_le64(phy_type_low) &
+ abilities->phy_type_low;
+ } else {
+ err = -EAGAIN;
+ netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n");
+ goto done;
+ }
+
+ /* If link is up put link down */
+ if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) {
+ /* Tell the OS link is going down, the link will go
+ * back up when fw says it is ready asynchronously
+ */
+ ice_print_link_msg(np->vsi, false);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+
+ /* make the aq call */
+ status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL);
+ if (status) {
+ netdev_info(netdev, "Set phy config failed,\n");
+ err = -EAGAIN;
+ }
+
+done:
+ devm_kfree(&pf->pdev->dev, abilities);
+ clear_bit(__ICE_CFG_BUSY, pf->state);
+
+ return err;
+}
+
+/**
* ice_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -933,6 +1653,7 @@ static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings,
+ .set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6076fc87df9d..88f11498804b 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -6,251 +6,249 @@
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
-#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
-#define PF_FW_ARQBAH 0x00080180
-#define PF_FW_ARQBAL 0x00080080
-#define PF_FW_ARQH 0x00080380
-#define PF_FW_ARQH_ARQH_S 0
-#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, PF_FW_ARQH_ARQH_S)
-#define PF_FW_ARQLEN 0x00080280
-#define PF_FW_ARQLEN_ARQLEN_S 0
-#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, PF_FW_ARQLEN_ARQLEN_S)
-#define PF_FW_ARQLEN_ARQVFE_S 28
-#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S)
-#define PF_FW_ARQLEN_ARQOVFL_S 29
-#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S)
-#define PF_FW_ARQLEN_ARQCRIT_S 30
-#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S)
-#define PF_FW_ARQLEN_ARQENABLE_S 31
-#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S)
-#define PF_FW_ARQT 0x00080480
-#define PF_FW_ATQBAH 0x00080100
-#define PF_FW_ATQBAL 0x00080000
-#define PF_FW_ATQH 0x00080300
-#define PF_FW_ATQH_ATQH_S 0
-#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, PF_FW_ATQH_ATQH_S)
-#define PF_FW_ATQLEN 0x00080200
-#define PF_FW_ATQLEN_ATQLEN_S 0
-#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S)
-#define PF_FW_ATQLEN_ATQVFE_S 28
-#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S)
-#define PF_FW_ATQLEN_ATQOVFL_S 29
-#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S)
-#define PF_FW_ATQLEN_ATQCRIT_S 30
-#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S)
-#define PF_FW_ATQLEN_ATQENABLE_S 31
-#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S)
-#define PF_FW_ATQT 0x00080400
-
+#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
+#define PF_FW_ARQBAH 0x00080180
+#define PF_FW_ARQBAL 0x00080080
+#define PF_FW_ARQH 0x00080380
+#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, 0)
+#define PF_FW_ARQLEN 0x00080280
+#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
+#define PF_FW_ARQLEN_ARQVFE_M BIT(28)
+#define PF_FW_ARQLEN_ARQOVFL_M BIT(29)
+#define PF_FW_ARQLEN_ARQCRIT_M BIT(30)
+#define PF_FW_ARQLEN_ARQENABLE_M BIT(31)
+#define PF_FW_ARQT 0x00080480
+#define PF_FW_ATQBAH 0x00080100
+#define PF_FW_ATQBAL 0x00080000
+#define PF_FW_ATQH 0x00080300
+#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, 0)
+#define PF_FW_ATQLEN 0x00080200
+#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
+#define PF_FW_ATQLEN_ATQVFE_M BIT(28)
+#define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
+#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
+#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
+#define PF_FW_ATQT 0x00080400
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, 8)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, 16)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, 24)
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
-#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0)
#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30
-#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S)
+#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, 30)
#define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045c900 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0
-#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, 0)
#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30
-#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S)
+#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, 30)
#define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045ca00 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0
-#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, 0)
#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30
-#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S)
+#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, 30)
#define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045cb00 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0
-#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, 0)
#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30
-#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S)
-
-#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4))
-#define QRXFLXP_CNTXT_RXDID_IDX_S 0
-#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, QRXFLXP_CNTXT_RXDID_IDX_S)
-#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
-#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, QRXFLXP_CNTXT_RXDID_PRIO_S)
-#define QRXFLXP_CNTXT_TS_S 11
-#define QRXFLXP_CNTXT_TS_M BIT(QRXFLXP_CNTXT_TS_S)
-#define GLGEN_RSTAT 0x000B8188
-#define GLGEN_RSTAT_DEVSTATE_S 0
-#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S)
-#define GLGEN_RSTCTL 0x000B8180
-#define GLGEN_RSTCTL_GRSTDEL_S 0
-#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S)
-#define GLGEN_RSTAT_RESET_TYPE_S 2
-#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, GLGEN_RSTAT_RESET_TYPE_S)
-#define GLGEN_RTRIG 0x000B8190
-#define GLGEN_RTRIG_CORER_S 0
-#define GLGEN_RTRIG_CORER_M BIT(GLGEN_RTRIG_CORER_S)
-#define GLGEN_RTRIG_GLOBR_S 1
-#define GLGEN_RTRIG_GLOBR_M BIT(GLGEN_RTRIG_GLOBR_S)
-#define GLGEN_STAT 0x000B612C
-#define PFGEN_CTRL 0x00091000
-#define PFGEN_CTRL_PFSWR_S 0
-#define PFGEN_CTRL_PFSWR_M BIT(PFGEN_CTRL_PFSWR_S)
-#define PFGEN_STATE 0x00088000
-#define PRTGEN_STATUS 0x000B8100
-#define PFHMC_ERRORDATA 0x00520500
-#define PFHMC_ERRORINFO 0x00520400
-#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
-#define GLINT_DYN_CTL_INTENA_S 0
-#define GLINT_DYN_CTL_INTENA_M BIT(GLINT_DYN_CTL_INTENA_S)
-#define GLINT_DYN_CTL_CLEARPBA_S 1
-#define GLINT_DYN_CTL_CLEARPBA_M BIT(GLINT_DYN_CTL_CLEARPBA_S)
-#define GLINT_DYN_CTL_SWINT_TRIG_S 2
-#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(GLINT_DYN_CTL_SWINT_TRIG_S)
-#define GLINT_DYN_CTL_ITR_INDX_S 3
-#define GLINT_DYN_CTL_SW_ITR_INDX_S 25
-#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S)
-#define GLINT_DYN_CTL_INTENA_MSK_S 31
-#define GLINT_DYN_CTL_INTENA_MSK_M BIT(GLINT_DYN_CTL_INTENA_MSK_S)
-#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
-#define PFINT_FW_CTL 0x0016C800
-#define PFINT_FW_CTL_MSIX_INDX_S 0
-#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_FW_CTL_MSIX_INDX_S)
-#define PFINT_FW_CTL_ITR_INDX_S 11
-#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, PFINT_FW_CTL_ITR_INDX_S)
-#define PFINT_FW_CTL_CAUSE_ENA_S 30
-#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
-#define PFINT_OICR 0x0016CA00
-#define PFINT_OICR_ECC_ERR_S 16
-#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
-#define PFINT_OICR_MAL_DETECT_S 19
-#define PFINT_OICR_MAL_DETECT_M BIT(PFINT_OICR_MAL_DETECT_S)
-#define PFINT_OICR_GRST_S 20
-#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
-#define PFINT_OICR_PCI_EXCEPTION_S 21
-#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
-#define PFINT_OICR_HMC_ERR_S 26
-#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
-#define PFINT_OICR_PE_CRITERR_S 28
-#define PFINT_OICR_PE_CRITERR_M BIT(PFINT_OICR_PE_CRITERR_S)
-#define PFINT_OICR_CTL 0x0016CA80
-#define PFINT_OICR_CTL_MSIX_INDX_S 0
-#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_OICR_CTL_MSIX_INDX_S)
-#define PFINT_OICR_CTL_ITR_INDX_S 11
-#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, PFINT_OICR_CTL_ITR_INDX_S)
-#define PFINT_OICR_CTL_CAUSE_ENA_S 30
-#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(PFINT_OICR_CTL_CAUSE_ENA_S)
-#define PFINT_OICR_ENA 0x0016C900
-#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
-#define QINT_RQCTL_MSIX_INDX_S 0
-#define QINT_RQCTL_ITR_INDX_S 11
-#define QINT_RQCTL_CAUSE_ENA_S 30
-#define QINT_RQCTL_CAUSE_ENA_M BIT(QINT_RQCTL_CAUSE_ENA_S)
-#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
-#define QINT_TQCTL_MSIX_INDX_S 0
-#define QINT_TQCTL_ITR_INDX_S 11
-#define QINT_TQCTL_CAUSE_ENA_S 30
-#define QINT_TQCTL_CAUSE_ENA_M BIT(QINT_TQCTL_CAUSE_ENA_S)
-#define GLLAN_RCTL_0 0x002941F8
-#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
-#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
-#define QRX_CTRL_MAX_INDEX 2047
-#define QRX_CTRL_QENA_REQ_S 0
-#define QRX_CTRL_QENA_REQ_M BIT(QRX_CTRL_QENA_REQ_S)
-#define QRX_CTRL_QENA_STAT_S 2
-#define QRX_CTRL_QENA_STAT_M BIT(QRX_CTRL_QENA_STAT_S)
-#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4))
-#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4))
-#define GLNVM_FLA 0x000B6108
-#define GLNVM_FLA_LOCKED_S 6
-#define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S)
-#define GLNVM_GENS 0x000B6100
-#define GLNVM_GENS_SR_SIZE_S 5
-#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, GLNVM_GENS_SR_SIZE_S)
-#define GLNVM_ULD 0x000B6008
-#define GLNVM_ULD_CORER_DONE_S 3
-#define GLNVM_ULD_CORER_DONE_M BIT(GLNVM_ULD_CORER_DONE_S)
-#define GLNVM_ULD_GLOBR_DONE_S 4
-#define GLNVM_ULD_GLOBR_DONE_M BIT(GLNVM_ULD_GLOBR_DONE_S)
-#define PF_FUNC_RID 0x0009E880
-#define PF_FUNC_RID_FUNC_NUM_S 0
-#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, PF_FUNC_RID_FUNC_NUM_S)
-#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
-#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
-#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
-#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
-#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
-#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8))
-#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8))
-#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8))
-#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8))
-#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8))
-#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8))
-#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8))
-#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8))
-#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8))
-#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8))
-#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8))
-#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8))
-#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8))
-#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8))
-#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8))
-#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8))
-#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8))
-#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8))
-#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8))
-#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8))
-#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8))
-#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8))
-#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8))
-#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8))
-#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8))
-#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8))
-#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8))
-#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8))
-#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8))
-#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8))
-#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8))
-#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8))
-#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8))
-#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8))
-#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8))
-#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8))
-#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8))
-#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8))
-#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8))
-#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8))
-#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
-#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
-#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
-#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
-#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
-#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
-#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
-#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
-#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
-#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
-#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
-#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8))
-#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
-#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8))
-#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
-#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8))
-#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
-#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8))
-#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
-#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8))
-#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8))
-#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8))
-#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8))
-#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8))
-#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8))
-#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4))
-#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
-#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8))
-#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
-#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
-#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
-#define VSIQF_HKEY_MAX_INDEX 12
+#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, 30)
+#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4))
+#define QRXFLXP_CNTXT_RXDID_IDX_S 0
+#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0)
+#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
+#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8)
+#define GLGEN_RSTAT 0x000B8188
+#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0)
+#define GLGEN_RSTCTL 0x000B8180
+#define GLGEN_RSTCTL_GRSTDEL_S 0
+#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S)
+#define GLGEN_RSTAT_RESET_TYPE_S 2
+#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, 2)
+#define GLGEN_RTRIG 0x000B8190
+#define GLGEN_RTRIG_CORER_M BIT(0)
+#define GLGEN_RTRIG_GLOBR_M BIT(1)
+#define GLGEN_STAT 0x000B612C
+#define PFGEN_CTRL 0x00091000
+#define PFGEN_CTRL_PFSWR_M BIT(0)
+#define PFGEN_STATE 0x00088000
+#define PRTGEN_STATUS 0x000B8100
+#define PFHMC_ERRORDATA 0x00520500
+#define PFHMC_ERRORINFO 0x00520400
+#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
+#define GLINT_DYN_CTL_INTENA_M BIT(0)
+#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
+#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2)
+#define GLINT_DYN_CTL_ITR_INDX_S 3
+#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
+#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
+#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
+#define PFINT_FW_CTL 0x0016C800
+#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
+#define PFINT_FW_CTL_ITR_INDX_S 11
+#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11)
+#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_OICR 0x0016CA00
+#define PFINT_OICR_ECC_ERR_M BIT(16)
+#define PFINT_OICR_MAL_DETECT_M BIT(19)
+#define PFINT_OICR_GRST_M BIT(20)
+#define PFINT_OICR_PCI_EXCEPTION_M BIT(21)
+#define PFINT_OICR_HMC_ERR_M BIT(26)
+#define PFINT_OICR_PE_CRITERR_M BIT(28)
+#define PFINT_OICR_CTL 0x0016CA80
+#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
+#define PFINT_OICR_CTL_ITR_INDX_S 11
+#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, 11)
+#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_OICR_ENA 0x0016C900
+#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
+#define QINT_RQCTL_MSIX_INDX_S 0
+#define QINT_RQCTL_ITR_INDX_S 11
+#define QINT_RQCTL_CAUSE_ENA_M BIT(30)
+#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
+#define QINT_TQCTL_MSIX_INDX_S 0
+#define QINT_TQCTL_ITR_INDX_S 11
+#define QINT_TQCTL_CAUSE_ENA_M BIT(30)
+#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
+#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
+#define QRX_CTRL_MAX_INDEX 2047
+#define QRX_CTRL_QENA_REQ_S 0
+#define QRX_CTRL_QENA_REQ_M BIT(0)
+#define QRX_CTRL_QENA_STAT_S 2
+#define QRX_CTRL_QENA_STAT_M BIT(2)
+#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4))
+#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4))
+#define QRX_TAIL_MAX_INDEX 2047
+#define QRX_TAIL_TAIL_S 0
+#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0)
+#define GL_MDET_RX 0x00294C00
+#define GL_MDET_RX_QNUM_S 0
+#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
+#define GL_MDET_RX_VF_NUM_S 15
+#define GL_MDET_RX_VF_NUM_M ICE_M(0xFF, 15)
+#define GL_MDET_RX_PF_NUM_S 23
+#define GL_MDET_RX_PF_NUM_M ICE_M(0x7, 23)
+#define GL_MDET_RX_MAL_TYPE_S 26
+#define GL_MDET_RX_MAL_TYPE_M ICE_M(0x1F, 26)
+#define GL_MDET_RX_VALID_M BIT(31)
+#define GL_MDET_TX_PQM 0x002D2E00
+#define GL_MDET_TX_PQM_PF_NUM_S 0
+#define GL_MDET_TX_PQM_PF_NUM_M ICE_M(0x7, 0)
+#define GL_MDET_TX_PQM_VF_NUM_S 4
+#define GL_MDET_TX_PQM_VF_NUM_M ICE_M(0xFF, 4)
+#define GL_MDET_TX_PQM_QNUM_S 12
+#define GL_MDET_TX_PQM_QNUM_M ICE_M(0x3FFF, 12)
+#define GL_MDET_TX_PQM_MAL_TYPE_S 26
+#define GL_MDET_TX_PQM_MAL_TYPE_M ICE_M(0x1F, 26)
+#define GL_MDET_TX_PQM_VALID_M BIT(31)
+#define GL_MDET_TX_TCLAN 0x000FC068
+#define GL_MDET_TX_TCLAN_QNUM_S 0
+#define GL_MDET_TX_TCLAN_QNUM_M ICE_M(0x7FFF, 0)
+#define GL_MDET_TX_TCLAN_VF_NUM_S 15
+#define GL_MDET_TX_TCLAN_VF_NUM_M ICE_M(0xFF, 15)
+#define GL_MDET_TX_TCLAN_PF_NUM_S 23
+#define GL_MDET_TX_TCLAN_PF_NUM_M ICE_M(0x7, 23)
+#define GL_MDET_TX_TCLAN_MAL_TYPE_S 26
+#define GL_MDET_TX_TCLAN_MAL_TYPE_M ICE_M(0x1F, 26)
+#define GL_MDET_TX_TCLAN_VALID_M BIT(31)
+#define PF_MDET_RX 0x00294280
+#define PF_MDET_RX_VALID_M BIT(0)
+#define PF_MDET_TX_PQM 0x002D2C80
+#define PF_MDET_TX_PQM_VALID_M BIT(0)
+#define PF_MDET_TX_TCLAN 0x000FC000
+#define PF_MDET_TX_TCLAN_VALID_M BIT(0)
+#define GLNVM_FLA 0x000B6108
+#define GLNVM_FLA_LOCKED_M BIT(6)
+#define GLNVM_GENS 0x000B6100
+#define GLNVM_GENS_SR_SIZE_S 5
+#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
+#define GLNVM_ULD 0x000B6008
+#define GLNVM_ULD_CORER_DONE_M BIT(3)
+#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
+#define PF_FUNC_RID 0x0009E880
+#define PF_FUNC_RID_FUNC_NUM_S 0
+#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
+#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
+#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
+#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
+#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
+#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
+#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8))
+#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8))
+#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8))
+#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8))
+#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8))
+#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8))
+#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8))
+#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8))
+#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8))
+#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8))
+#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8))
+#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8))
+#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8))
+#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8))
+#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8))
+#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8))
+#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8))
+#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8))
+#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8))
+#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8))
+#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8))
+#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8))
+#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8))
+#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8))
+#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8))
+#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8))
+#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8))
+#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8))
+#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8))
+#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8))
+#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8))
+#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8))
+#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8))
+#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8))
+#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8))
+#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8))
+#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8))
+#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8))
+#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8))
+#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8))
+#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
+#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
+#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
+#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
+#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
+#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
+#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
+#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
+#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
+#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
+#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
+#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8))
+#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
+#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8))
+#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
+#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8))
+#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
+#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8))
+#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
+#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8))
+#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8))
+#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8))
+#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8))
+#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8))
+#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8))
+#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4))
+#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
+#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8))
+#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
+#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
+#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define VSIQF_HKEY_MAX_INDEX 12
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 068dbc740b76..94504023d86e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -188,23 +188,25 @@ struct ice_32b_rx_flex_desc_nic {
* with a specific metadata (profile 7 reserved for HW)
*/
enum ice_rxdid {
- ICE_RXDID_START = 0,
- ICE_RXDID_LEGACY_0 = ICE_RXDID_START,
- ICE_RXDID_LEGACY_1,
- ICE_RXDID_FLX_START,
- ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START,
- ICE_RXDID_FLX_LAST = 63,
- ICE_RXDID_LAST = ICE_RXDID_FLX_LAST
+ ICE_RXDID_LEGACY_0 = 0,
+ ICE_RXDID_LEGACY_1 = 1,
+ ICE_RXDID_FLEX_NIC = 2,
+ ICE_RXDID_FLEX_NIC_2 = 6,
+ ICE_RXDID_HW = 7,
+ ICE_RXDID_LAST = 63,
};
/* Receive Flex Descriptor Rx opcode values */
#define ICE_RX_OPC_MDID 0x01
/* Receive Descriptor MDID values */
-#define ICE_RX_MDID_FLOW_ID_LOWER 5
-#define ICE_RX_MDID_FLOW_ID_HIGH 6
-#define ICE_RX_MDID_HASH_LOW 56
-#define ICE_RX_MDID_HASH_HIGH 57
+enum ice_flex_rx_mdid {
+ ICE_RX_MDID_FLOW_ID_LOWER = 5,
+ ICE_RX_MDID_FLOW_ID_HIGH,
+ ICE_RX_MDID_SRC_VSI = 19,
+ ICE_RX_MDID_HASH_LOW = 56,
+ ICE_RX_MDID_HASH_HIGH,
+};
/* Rx Flag64 packet flag bits */
enum ice_rx_flg64_bits {
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f1e80eed2fd6..1b49a605d094 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -7,7 +7,7 @@
#include "ice.h"
-#define DRV_VERSION "ice-0.7.0-k"
+#define DRV_VERSION "0.7.1-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -32,10 +32,86 @@ static const struct net_device_ops ice_netdev_ops;
static void ice_pf_dis_all_vsi(struct ice_pf *pf);
static void ice_rebuild(struct ice_pf *pf);
static int ice_vsi_release(struct ice_vsi *vsi);
+static void ice_vsi_release_all(struct ice_pf *pf);
static void ice_update_vsi_stats(struct ice_vsi *vsi);
static void ice_update_pf_stats(struct ice_pf *pf);
/**
+ * ice_get_tx_pending - returns number of Tx descriptors not processed
+ * @ring: the ring of descriptors
+ */
+static u32 ice_get_tx_pending(struct ice_ring *ring)
+{
+ u32 head, tail;
+
+ head = ring->next_to_clean;
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+ return 0;
+}
+
+/**
+ * ice_check_for_hang_subtask - check for and recover hung queues
+ * @pf: pointer to PF struct
+ */
+static void ice_check_for_hang_subtask(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = NULL;
+ unsigned int i;
+ u32 v, v_idx;
+ int packets;
+
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
+ vsi = pf->vsi[v];
+ break;
+ }
+
+ if (!vsi || test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
+ return;
+
+ for (i = 0; i < vsi->num_txq; i++) {
+ struct ice_ring *tx_ring = vsi->tx_rings[i];
+
+ if (tx_ring && tx_ring->desc) {
+ int itr = ICE_ITR_NONE;
+
+ /* If packet counter has not changed the queue is
+ * likely stalled, so force an interrupt for this
+ * queue.
+ *
+ * prev_pkt would be negative if there was no
+ * pending work.
+ */
+ packets = tx_ring->stats.pkts & INT_MAX;
+ if (tx_ring->tx_stats.prev_pkt == packets) {
+ /* Trigger sw interrupt to revive the queue */
+ v_idx = tx_ring->q_vector->v_idx;
+ wr32(&vsi->back->hw,
+ GLINT_DYN_CTL(vsi->base_vector + v_idx),
+ (itr << GLINT_DYN_CTL_ITR_INDX_S) |
+ GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_INTENA_MSK_M);
+ continue;
+ }
+
+ /* Memory barrier between read of packet count and call
+ * to ice_get_tx_pending()
+ */
+ smp_rmb();
+ tx_ring->tx_stats.prev_pkt =
+ ice_get_tx_pending(tx_ring) ? packets : -1;
+ }
+ }
+}
+
+/**
* ice_get_free_slot - get the next non-NULL location index in array
* @array: array to search
* @size: size of the array
@@ -274,6 +350,63 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
}
/**
+ * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
+ * @vsi: VSI to enable or disable VLAN pruning on
+ * @ena: set to true to enable VLAN pruning and false to disable it
+ *
+ * returns 0 if VSI is updated, negative otherwise
+ */
+static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_vsi_ctx *ctxt;
+ struct device *dev;
+ int status;
+
+ if (!vsi)
+ return -EINVAL;
+
+ dev = &vsi->back->pdev->dev;
+ ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ if (ena) {
+ ctxt->info.sec_flags |=
+ ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ } else {
+ ctxt->info.sec_flags &=
+ ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ }
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+ ctxt->vsi_num = vsi->vsi_num;
+ status = ice_aq_update_vsi(&vsi->back->hw, ctxt, NULL);
+ if (status) {
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI %d failed, err = %d, aq_err = %d\n",
+ ena ? "Ena" : "Dis", vsi->vsi_num, status,
+ vsi->back->hw.adminq.sq_last_status);
+ goto err_out;
+ }
+
+ vsi->info.sec_flags = ctxt->info.sec_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+
+ devm_kfree(dev, ctxt);
+ return 0;
+
+err_out:
+ devm_kfree(dev, ctxt);
+ return -EIO;
+}
+
+/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -456,23 +589,13 @@ static void
ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- u32 v;
-
- ice_for_each_vsi(pf, v)
- if (pf->vsi[v])
- ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num);
-
- dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
/* disable the VSIs and their queues that are not already DOWN */
- /* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */
ice_pf_dis_all_vsi(pf);
- ice_for_each_vsi(pf, v)
- if (pf->vsi[v])
- pf->vsi[v]->vsi_num = 0;
-
ice_shutdown_all_ctrlq(hw);
+
+ set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
}
/**
@@ -490,26 +613,32 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
WARN_ON(in_interrupt());
/* PFR is a bit of a special case because it doesn't result in an OICR
- * interrupt. So for PFR, we prepare for reset, issue the reset and
- * rebuild sequentially.
+ * interrupt. Set pending bit here which otherwise gets set in the
+ * OICR handler.
*/
- if (reset_type == ICE_RESET_PFR) {
+ if (reset_type == ICE_RESET_PFR)
set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
- ice_prepare_for_reset(pf);
- }
+
+ ice_prepare_for_reset(pf);
/* trigger the reset */
if (ice_reset(hw, reset_type)) {
dev_err(dev, "reset %d failed\n", reset_type);
set_bit(__ICE_RESET_FAILED, pf->state);
clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
return;
}
+ /* PFR is a bit of a special case because it doesn't result in an OICR
+ * interrupt. So for PFR, rebuild after the reset and clear the reset-
+ * associated state bits.
+ */
if (reset_type == ICE_RESET_PFR) {
pf->pfr_count++;
ice_rebuild(pf);
clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
}
}
@@ -519,48 +648,57 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
*/
static void ice_reset_subtask(struct ice_pf *pf)
{
- enum ice_reset_req reset_type;
-
- rtnl_lock();
+ enum ice_reset_req reset_type = ICE_RESET_INVAL;
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
- * OICR interrupt. The OICR handler (ice_misc_intr) determines what
- * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in
- * pf->state. So if reset/recovery is pending (as indicated by this bit)
- * we do a rebuild and return.
+ * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
+ * of reset is pending and sets bits in pf->state indicating the reset
+ * type and __ICE_RESET_RECOVERY_PENDING. So, if the latter bit is set
+ * prepare for pending reset if not already (for PF software-initiated
+ * global resets the software should already be prepared for it as
+ * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
+ * by firmware or software on other PFs, that bit is not set so prepare
+ * for the reset now), poll for reset done, rebuild and return.
*/
if (ice_is_reset_recovery_pending(pf->state)) {
clear_bit(__ICE_GLOBR_RECV, pf->state);
clear_bit(__ICE_CORER_RECV, pf->state);
- ice_prepare_for_reset(pf);
+ if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
+ ice_prepare_for_reset(pf);
/* make sure we are ready to rebuild */
- if (ice_check_reset(&pf->hw))
+ if (ice_check_reset(&pf->hw)) {
set_bit(__ICE_RESET_FAILED, pf->state);
- else
+ } else {
+ /* done with reset. start rebuild */
+ pf->hw.reset_ongoing = false;
ice_rebuild(pf);
- clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
- goto unlock;
+ /* clear bit to resume normal operations, but
+ * ICE_NEEDS_RESTART bit is set incase rebuild failed
+ */
+ clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
+ }
+
+ return;
}
/* No pending resets to finish processing. Check for new resets */
+ if (test_and_clear_bit(__ICE_PFR_REQ, pf->state))
+ reset_type = ICE_RESET_PFR;
+ if (test_and_clear_bit(__ICE_CORER_REQ, pf->state))
+ reset_type = ICE_RESET_CORER;
if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state))
reset_type = ICE_RESET_GLOBR;
- else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state))
- reset_type = ICE_RESET_CORER;
- else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state))
- reset_type = ICE_RESET_PFR;
- else
- goto unlock;
+ /* If no valid reset type requested just return */
+ if (reset_type == ICE_RESET_INVAL)
+ return;
- /* reset if not already down or resetting */
+ /* reset if not already down or busy */
if (!test_bit(__ICE_DOWN, pf->state) &&
!test_bit(__ICE_CFG_BUSY, pf->state)) {
ice_do_reset(pf, reset_type);
}
-
-unlock:
- rtnl_unlock();
}
/**
@@ -903,6 +1041,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
dev_err(&pf->pdev->dev,
"Could not handle link event\n");
break;
+ case ice_aqc_opc_fw_logging:
+ ice_output_fw_log(hw, &event.desc, event.msg_buf);
+ break;
default:
dev_dbg(&pf->pdev->dev,
"%s Receive Queue unknown event 0x%04x ignored\n",
@@ -966,8 +1107,9 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
*/
static void ice_service_task_schedule(struct ice_pf *pf)
{
- if (!test_bit(__ICE_DOWN, pf->state) &&
- !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state))
+ if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
+ !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
+ !test_bit(__ICE_NEEDS_RESTART, pf->state))
queue_work(ice_wq, &pf->serv_task);
}
@@ -985,6 +1127,22 @@ static void ice_service_task_complete(struct ice_pf *pf)
}
/**
+ * ice_service_task_stop - stop service task and cancel works
+ * @pf: board private structure
+ */
+static void ice_service_task_stop(struct ice_pf *pf)
+{
+ set_bit(__ICE_SERVICE_DIS, pf->state);
+
+ if (pf->serv_tmr.function)
+ del_timer_sync(&pf->serv_tmr);
+ if (pf->serv_task.func)
+ cancel_work_sync(&pf->serv_task);
+
+ clear_bit(__ICE_SERVICE_SCHED, pf->state);
+}
+
+/**
* ice_service_timer - timer callback to schedule service task
* @t: pointer to timer_list
*/
@@ -997,6 +1155,114 @@ static void ice_service_timer(struct timer_list *t)
}
/**
+ * ice_handle_mdd_event - handle malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from service task. OICR interrupt handler indicates MDD event
+ */
+static void ice_handle_mdd_event(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ bool mdd_detected = false;
+ u32 reg;
+
+ if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
+ return;
+
+ /* find what triggered the MDD event */
+ reg = rd32(hw, GL_MDET_TX_PQM);
+ if (reg & GL_MDET_TX_PQM_VALID_M) {
+ u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
+ GL_MDET_TX_PQM_PF_NUM_S;
+ u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
+ GL_MDET_TX_PQM_VF_NUM_S;
+ u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
+ GL_MDET_TX_PQM_MAL_TYPE_S;
+ u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
+ GL_MDET_TX_PQM_QNUM_S);
+
+ if (netif_msg_tx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
+ event, queue, pf_num, vf_num);
+ wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ reg = rd32(hw, GL_MDET_TX_TCLAN);
+ if (reg & GL_MDET_TX_TCLAN_VALID_M) {
+ u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
+ GL_MDET_TX_TCLAN_PF_NUM_S;
+ u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
+ GL_MDET_TX_TCLAN_VF_NUM_S;
+ u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
+ GL_MDET_TX_TCLAN_MAL_TYPE_S;
+ u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
+ GL_MDET_TX_TCLAN_QNUM_S);
+
+ if (netif_msg_rx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
+ event, queue, pf_num, vf_num);
+ wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ reg = rd32(hw, GL_MDET_RX);
+ if (reg & GL_MDET_RX_VALID_M) {
+ u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
+ GL_MDET_RX_PF_NUM_S;
+ u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
+ GL_MDET_RX_VF_NUM_S;
+ u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
+ GL_MDET_RX_MAL_TYPE_S;
+ u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
+ GL_MDET_RX_QNUM_S);
+
+ if (netif_msg_rx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
+ event, queue, pf_num, vf_num);
+ wr32(hw, GL_MDET_RX, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ if (mdd_detected) {
+ bool pf_mdd_detected = false;
+
+ reg = rd32(hw, PF_MDET_TX_PQM);
+ if (reg & PF_MDET_TX_PQM_VALID_M) {
+ wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
+ dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
+ pf_mdd_detected = true;
+ }
+
+ reg = rd32(hw, PF_MDET_TX_TCLAN);
+ if (reg & PF_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
+ dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
+ pf_mdd_detected = true;
+ }
+
+ reg = rd32(hw, PF_MDET_RX);
+ if (reg & PF_MDET_RX_VALID_M) {
+ wr32(hw, PF_MDET_RX, 0xFFFF);
+ dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
+ pf_mdd_detected = true;
+ }
+ /* Queue belongs to the PF initiate a reset */
+ if (pf_mdd_detected) {
+ set_bit(__ICE_NEEDS_RESTART, pf->state);
+ ice_service_task_schedule(pf);
+ }
+ }
+
+ /* re-enable MDD interrupt cause */
+ clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
+ reg = rd32(hw, PFINT_OICR_ENA);
+ reg |= PFINT_OICR_MAL_DETECT_M;
+ wr32(hw, PFINT_OICR_ENA, reg);
+ ice_flush(hw);
+}
+
+/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
*/
@@ -1010,14 +1276,17 @@ static void ice_service_task(struct work_struct *work)
/* process reset requests first */
ice_reset_subtask(pf);
- /* bail if a reset/recovery cycle is pending */
+ /* bail if a reset/recovery cycle is pending or rebuild failed */
if (ice_is_reset_recovery_pending(pf->state) ||
- test_bit(__ICE_SUSPENDED, pf->state)) {
+ test_bit(__ICE_SUSPENDED, pf->state) ||
+ test_bit(__ICE_NEEDS_RESTART, pf->state)) {
ice_service_task_complete(pf);
return;
}
+ ice_check_for_hang_subtask(pf);
ice_sync_fltr_subtask(pf);
+ ice_handle_mdd_event(pf);
ice_watchdog_subtask(pf);
ice_clean_adminq_subtask(pf);
@@ -1029,6 +1298,7 @@ static void ice_service_task(struct work_struct *work)
* schedule the service task now.
*/
if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
+ test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
@@ -1157,7 +1427,7 @@ static void ice_vsi_delete(struct ice_vsi *vsi)
memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
- status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
+ status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL);
if (status)
dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
vsi->vsi_num);
@@ -1420,13 +1690,13 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
}
/**
- * ice_vsi_add - Create a new VSI or fetch preallocated VSI
+ * ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
*
* This initializes a VSI context depending on the VSI type to be added and
* passes it down to the add_vsi aq command to create a new VSI.
*/
-static int ice_vsi_add(struct ice_vsi *vsi)
+static int ice_vsi_init(struct ice_vsi *vsi)
{
struct ice_vsi_ctx ctxt = { 0 };
struct ice_pf *pf = vsi->back;
@@ -1453,13 +1723,17 @@ static int ice_vsi_add(struct ice_vsi *vsi)
ctxt.info.sw_id = vsi->port_info->sw_id;
ice_vsi_setup_q_map(vsi, &ctxt);
- ret = ice_aq_add_vsi(hw, &ctxt, NULL);
+ ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL);
if (ret) {
- dev_err(&vsi->back->pdev->dev,
- "Add VSI AQ call failed, err %d\n", ret);
+ dev_err(&pf->pdev->dev,
+ "Add VSI failed, err %d\n", ret);
return -EIO;
}
+
+ /* keep context for update VSI operations */
vsi->info = ctxt.info;
+
+ /* record VSI number returned */
vsi->vsi_num = ctxt.vsi_num;
return ret;
@@ -1735,8 +2009,14 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
+ if (oicr & PFINT_OICR_MAL_DETECT_M) {
+ ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
+ set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
+ }
+
if (oicr & PFINT_OICR_GRST_M) {
u32 reset;
+
/* we have a reset warning */
ena_mask &= ~PFINT_OICR_GRST_M;
reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
@@ -1754,7 +2034,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
* We also make note of which reset happened so that peer
* devices/drivers can be informed.
*/
- if (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) {
+ if (!test_and_set_bit(__ICE_RESET_RECOVERY_PENDING,
+ pf->state)) {
if (reset == ICE_RESET_CORER)
set_bit(__ICE_CORER_RECV, pf->state);
else if (reset == ICE_RESET_GLOBR)
@@ -1762,7 +2043,20 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
else
set_bit(__ICE_EMPR_RECV, pf->state);
- set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ /* There are couple of different bits at play here.
+ * hw->reset_ongoing indicates whether the hardware is
+ * in reset. This is set to true when a reset interrupt
+ * is received and set back to false after the driver
+ * has determined that the hardware is out of reset.
+ *
+ * __ICE_RESET_RECOVERY_PENDING in pf->state indicates
+ * that a post reset rebuild is required before the
+ * driver is operational again. This is set above.
+ *
+ * As this is the start of the reset/rebuild cycle, set
+ * both to indicate that.
+ */
+ hw->reset_ongoing = true;
}
}
@@ -2635,14 +2929,12 @@ ice_vsi_cfg_rss_exit:
}
/**
- * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI
- * @vsi: pointer to the ice_vsi
- *
- * This reallocates the VSIs queue resources
+ * ice_vsi_rebuild - Rebuild VSI after reset
+ * @vsi: vsi to be rebuild
*
* Returns 0 on success and negative value on failure
*/
-static int ice_vsi_reinit_setup(struct ice_vsi *vsi)
+static int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
int ret, i;
@@ -2658,7 +2950,7 @@ static int ice_vsi_reinit_setup(struct ice_vsi *vsi)
ice_vsi_set_num_qs(vsi);
/* Initialize VSI struct elements and create VSI in FW */
- ret = ice_vsi_add(vsi);
+ ret = ice_vsi_init(vsi);
if (ret < 0)
goto err_vsi;
@@ -2668,19 +2960,7 @@ static int ice_vsi_reinit_setup(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_PF:
- if (!vsi->netdev) {
- ret = ice_cfg_netdev(vsi);
- if (ret)
- goto err_rings;
-
- ret = register_netdev(vsi->netdev);
- if (ret)
- goto err_rings;
-
- netif_carrier_off(vsi->netdev);
- netif_tx_stop_all_queues(vsi->netdev);
- }
-
+ /* fall through */
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
goto err_rings;
@@ -2732,21 +3012,23 @@ err_vsi:
/**
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
- * @type: VSI type
* @pi: pointer to the port_info instance
+ * @type: VSI type
+ * @vf_id: defines VF id to which this VSI connects. This field is meant to be
+ * used only for ICE_VSI_VF VSI type. For other VSI types, should
+ * fill-in ICE_INVAL_VFID as input.
*
* This allocates the sw VSI structure and its queue resources.
*
- * Returns pointer to the successfully allocated and configure VSI sw struct on
- * success, otherwise returns NULL on failure.
+ * Returns pointer to the successfully allocated and configured VSI sw struct on
+ * success, NULL on failure.
*/
static struct ice_vsi *
-ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type,
- struct ice_port_info *pi)
+ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
+ enum ice_vsi_type type, u16 __always_unused vf_id)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = &pf->pdev->dev;
- struct ice_vsi_ctx ctxt = { 0 };
struct ice_vsi *vsi;
int ret, i;
@@ -2769,12 +3051,10 @@ ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type,
ice_vsi_set_rss_params(vsi);
/* create the VSI */
- ret = ice_vsi_add(vsi);
+ ret = ice_vsi_init(vsi);
if (ret)
goto err_vsi;
- ctxt.vsi_num = vsi->vsi_num;
-
switch (vsi->type) {
case ICE_VSI_PF:
ret = ice_cfg_netdev(vsi);
@@ -2843,10 +3123,7 @@ err_register_netdev:
vsi->netdev = NULL;
}
err_cfg_netdev:
- ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
- if (ret)
- dev_err(&vsi->back->pdev->dev,
- "Free VSI AQ call failed, err %d\n", ret);
+ ice_vsi_delete(vsi);
err_vsi:
ice_vsi_put_qs(vsi);
err_get_qs:
@@ -2858,6 +3135,20 @@ err_get_qs:
}
/**
+ * ice_pf_vsi_setup - Set up a PF VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ *
+ * Returns pointer to the successfully allocated VSI sw struct on success,
+ * otherwise returns NULL on failure.
+ */
+static struct ice_vsi *
+ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
+}
+
+/**
* ice_vsi_add_vlan - Add vsi membership for given vlan
* @vsi: the vsi being configured
* @vid: vlan id to be added
@@ -2908,7 +3199,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- int ret = 0;
+ int ret;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -2919,6 +3210,13 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
if (vsi->info.pvid)
return -EINVAL;
+ /* Enable VLAN pruning when VLAN 0 is added */
+ if (unlikely(!vid)) {
+ ret = ice_cfg_vlan_pruning(vsi, true);
+ if (ret)
+ return ret;
+ }
+
/* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
@@ -2935,16 +3233,19 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
* ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
* @vsi: the VSI being configured
* @vid: VLAN id to be removed
+ *
+ * Returns 0 on success and negative on failure
*/
-static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
+static int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
struct ice_fltr_list_entry *list;
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
+ int status = 0;
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
if (!list)
- return;
+ return -ENOMEM;
list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
list->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
@@ -2956,11 +3257,14 @@ static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
INIT_LIST_HEAD(&list->list_entry);
list_add(&list->list_entry, &tmp_add_list);
- if (ice_remove_vlan(&pf->hw, &tmp_add_list))
+ if (ice_remove_vlan(&pf->hw, &tmp_add_list)) {
dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
vid, vsi->vsi_num);
+ status = -EIO;
+ }
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ return status;
}
/**
@@ -2976,19 +3280,25 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ int status;
if (vsi->info.pvid)
return -EINVAL;
- /* return code is ignored as there is nothing a user
- * can do about failure to remove and a log message was
- * already printed from the other function
+ /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
+ * information
*/
- ice_vsi_kill_vlan(vsi, vid);
+ status = ice_vsi_kill_vlan(vsi, vid);
+ if (status)
+ return status;
clear_bit(vid, vsi->active_vlans);
- return 0;
+ /* Disable VLAN pruning when VLAN 0 is removed */
+ if (unlikely(!vid))
+ status = ice_cfg_vlan_pruning(vsi, false);
+
+ return status;
}
/**
@@ -3004,50 +3314,48 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
struct ice_vsi *vsi;
int status = 0;
- if (!ice_is_reset_recovery_pending(pf->state)) {
- vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info);
- if (!vsi) {
- status = -ENOMEM;
- goto error_exit;
- }
- } else {
- vsi = pf->vsi[0];
- status = ice_vsi_reinit_setup(vsi);
- if (status < 0)
- return -EIO;
+ if (ice_is_reset_recovery_pending(pf->state))
+ return -EBUSY;
+
+ vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
+ if (!vsi) {
+ status = -ENOMEM;
+ goto unroll_vsi_setup;
}
- /* tmp_add_list contains a list of MAC addresses for which MAC
- * filters need to be programmed. Add the VSI's unicast MAC to
- * this list
+ /* To add a MAC filter, first add the MAC to a list and then
+ * pass the list to ice_add_mac.
*/
+
+ /* Add a unicast MAC filter so the VSI can get its packets */
status = ice_add_mac_to_list(vsi, &tmp_add_list,
vsi->port_info->mac.perm_addr);
if (status)
- goto error_exit;
+ goto unroll_vsi_setup;
/* VSI needs to receive broadcast traffic, so add the broadcast
- * MAC address to the list.
+ * MAC address to the list as well.
*/
eth_broadcast_addr(broadcast);
status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
if (status)
- goto error_exit;
+ goto free_mac_list;
/* program MAC filters for entries in tmp_add_list */
status = ice_add_mac(&pf->hw, &tmp_add_list);
if (status) {
dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
status = -ENOMEM;
- goto error_exit;
+ goto free_mac_list;
}
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
return status;
-error_exit:
+free_mac_list:
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+unroll_vsi_setup:
if (vsi) {
ice_vsi_free_q_vectors(vsi);
if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
@@ -3097,10 +3405,7 @@ static void ice_determine_q_usage(struct ice_pf *pf)
*/
static void ice_deinit_pf(struct ice_pf *pf)
{
- if (pf->serv_tmr.function)
- del_timer_sync(&pf->serv_tmr);
- if (pf->serv_task.func)
- cancel_work_sync(&pf->serv_task);
+ ice_service_task_stop(pf);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->avail_q_mutex);
}
@@ -3307,6 +3612,8 @@ static int ice_probe(struct pci_dev *pdev,
pf->pdev = pdev;
pci_set_drvdata(pdev, pf);
set_bit(__ICE_DOWN, pf->state);
+ /* Disable service task until DOWN bit is cleared */
+ set_bit(__ICE_SERVICE_DIS, pf->state);
hw = &pf->hw;
hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
@@ -3364,6 +3671,9 @@ static int ice_probe(struct pci_dev *pdev,
goto err_init_interrupt_unroll;
}
+ /* Driver is mostly up */
+ clear_bit(__ICE_DOWN, pf->state);
+
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
* the misc functionality and queue processing is combined in
@@ -3386,7 +3696,11 @@ static int ice_probe(struct pci_dev *pdev,
goto err_msix_misc_unroll;
}
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+ if (hw->evb_veb)
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
+
pf->first_sw->pf = pf;
/* record the sw_id available for later use */
@@ -3399,8 +3713,7 @@ static int ice_probe(struct pci_dev *pdev,
goto err_alloc_sw_unroll;
}
- /* Driver is mostly up */
- clear_bit(__ICE_DOWN, pf->state);
+ clear_bit(__ICE_SERVICE_DIS, pf->state);
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
@@ -3414,6 +3727,7 @@ static int ice_probe(struct pci_dev *pdev,
return 0;
err_alloc_sw_unroll:
+ set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
devm_kfree(&pf->pdev->dev, pf->first_sw);
err_msix_misc_unroll:
@@ -3436,24 +3750,14 @@ err_exit_unroll:
static void ice_remove(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
- int i = 0;
- int err;
if (!pf)
return;
set_bit(__ICE_DOWN, pf->state);
+ ice_service_task_stop(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (!pf->vsi[i])
- continue;
-
- err = ice_vsi_release(pf->vsi[i]);
- if (err)
- dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n",
- i, err);
- }
-
+ ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf);
ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf);
@@ -3500,7 +3804,7 @@ static int __init ice_module_init(void)
pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
pr_info("%s\n", ice_copyright);
- ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME);
+ ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
@@ -4185,7 +4489,14 @@ static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
}
status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
NULL);
- if (status) {
+ /* if the disable queue command was exercised during an active reset
+ * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
+ * the reset operation disables queues at the hardware level anyway.
+ */
+ if (status == ICE_ERR_RESET_ONGOING) {
+ dev_dbg(&pf->pdev->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status) {
dev_err(&pf->pdev->dev,
"Failed to disable LAN Tx queues, error: %d\n",
status);
@@ -5080,8 +5391,14 @@ static int ice_vsi_release(struct ice_vsi *vsi)
if (!vsi->back)
return -ENODEV;
pf = vsi->back;
-
- if (vsi->netdev) {
+ /* do not unregister and free netdevs while driver is in the reset
+ * recovery pending state. Since reset/rebuild happens through PF
+ * service task workqueue, its not a good idea to unregister netdev
+ * that is associated to the PF that is running the work queue items
+ * currently. This is done to avoid check_flush_dependency() warning
+ * on this wq
+ */
+ if (vsi->netdev && !ice_is_reset_recovery_pending(pf->state)) {
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
@@ -5107,12 +5424,40 @@ static int ice_vsi_release(struct ice_vsi *vsi)
pf->q_left_tx += vsi->alloc_txq;
pf->q_left_rx += vsi->alloc_rxq;
- ice_vsi_clear(vsi);
+ /* retain SW VSI data structure since it is needed to unregister and
+ * free VSI netdev when PF is not in reset recovery pending state,\
+ * for ex: during rmmod.
+ */
+ if (!ice_is_reset_recovery_pending(pf->state))
+ ice_vsi_clear(vsi);
return 0;
}
/**
+ * ice_vsi_release_all - Delete all VSIs
+ * @pf: PF from which all VSIs are being removed
+ */
+static void ice_vsi_release_all(struct ice_pf *pf)
+{
+ int err, i;
+
+ if (!pf->vsi)
+ return;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (!pf->vsi[i])
+ continue;
+
+ err = ice_vsi_release(pf->vsi[i]);
+ if (err)
+ dev_dbg(&pf->pdev->dev,
+ "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
+ i, err, pf->vsi[i]->vsi_num);
+ }
+}
+
+/**
* ice_dis_vsi - pause a VSI
* @vsi: the VSI being paused
*/
@@ -5124,27 +5469,31 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
set_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->netdev && netif_running(vsi->netdev) &&
- vsi->type == ICE_VSI_PF)
+ vsi->type == ICE_VSI_PF) {
+ rtnl_lock();
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
-
- ice_vsi_close(vsi);
+ rtnl_unlock();
+ } else {
+ ice_vsi_close(vsi);
+ }
}
/**
* ice_ena_vsi - resume a VSI
* @vsi: the VSI being resume
*/
-static void ice_ena_vsi(struct ice_vsi *vsi)
+static int ice_ena_vsi(struct ice_vsi *vsi)
{
- if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state))
- return;
+ int err = 0;
+
+ if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state))
+ if (vsi->netdev && netif_running(vsi->netdev)) {
+ rtnl_lock();
+ err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
+ rtnl_unlock();
+ }
- if (vsi->netdev && netif_running(vsi->netdev))
- vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
- else if (ice_vsi_open(vsi))
- /* this clears the DOWN bit */
- dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n",
- vsi->vsi_num, vsi->vsw->sw_id);
+ return err;
}
/**
@@ -5164,13 +5513,47 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf)
* ice_pf_ena_all_vsi - Resume all VSIs on a PF
* @pf: the PF
*/
-static void ice_pf_ena_all_vsi(struct ice_pf *pf)
+static int ice_pf_ena_all_vsi(struct ice_pf *pf)
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
- ice_ena_vsi(pf->vsi[v]);
+ if (ice_ena_vsi(pf->vsi[v]))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_rebuild_all - rebuild all VSIs in pf
+ * @pf: the PF
+ */
+static int ice_vsi_rebuild_all(struct ice_pf *pf)
+{
+ int i;
+
+ /* loop through pf->vsi array and reinit the VSI if found */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ int err;
+
+ if (!pf->vsi[i])
+ continue;
+
+ err = ice_vsi_rebuild(pf->vsi[i]);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "VSI at index %d rebuild failed\n",
+ pf->vsi[i]->idx);
+ return err;
+ }
+
+ dev_info(&pf->pdev->dev,
+ "VSI at index %d rebuilt. vsi_num = 0x%x\n",
+ pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
+ }
+
+ return 0;
}
/**
@@ -5192,13 +5575,13 @@ static void ice_rebuild(struct ice_pf *pf)
ret = ice_init_all_ctrlq(hw);
if (ret) {
dev_err(dev, "control queues init failed %d\n", ret);
- goto fail_reset;
+ goto err_init_ctrlq;
}
ret = ice_clear_pf_cfg(hw);
if (ret) {
dev_err(dev, "clear PF configuration failed %d\n", ret);
- goto fail_reset;
+ goto err_init_ctrlq;
}
ice_clear_pxe_mode(hw);
@@ -5206,14 +5589,24 @@ static void ice_rebuild(struct ice_pf *pf)
ret = ice_get_caps(hw);
if (ret) {
dev_err(dev, "ice_get_caps failed %d\n", ret);
- goto fail_reset;
+ goto err_init_ctrlq;
}
- /* basic nic switch setup */
- err = ice_setup_pf_sw(pf);
+ err = ice_sched_init_port(hw->port_info);
+ if (err)
+ goto err_sched_init_port;
+
+ err = ice_vsi_rebuild_all(pf);
if (err) {
- dev_err(dev, "ice_setup_pf_sw failed\n");
- goto fail_reset;
+ dev_err(dev, "ice_vsi_rebuild_all failed\n");
+ goto err_vsi_rebuild;
+ }
+
+ ret = ice_replay_all_fltr(&pf->hw);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "error replaying switch filter rules\n");
+ goto err_vsi_rebuild;
}
/* start misc vector */
@@ -5221,20 +5614,35 @@ static void ice_rebuild(struct ice_pf *pf)
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "misc vector setup failed: %d\n", err);
- goto fail_reset;
+ goto err_vsi_rebuild;
}
}
/* restart the VSIs that were rebuilt and running before the reset */
- ice_pf_ena_all_vsi(pf);
+ err = ice_pf_ena_all_vsi(pf);
+ if (err) {
+ dev_err(&pf->pdev->dev, "error enabling VSIs\n");
+ /* no need to disable VSIs in tear down path in ice_rebuild()
+ * since its already taken care in ice_vsi_open()
+ */
+ goto err_vsi_rebuild;
+ }
+ /* if we get here, reset flow is successful */
+ clear_bit(__ICE_RESET_FAILED, pf->state);
return;
-fail_reset:
+err_vsi_rebuild:
+ ice_vsi_release_all(pf);
+err_sched_init_port:
+ ice_sched_cleanup_all(hw);
+err_init_ctrlq:
ice_shutdown_all_ctrlq(hw);
set_bit(__ICE_RESET_FAILED, pf->state);
clear_recovery:
- set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ /* set this bit in PF state to control service task scheduling */
+ set_bit(__ICE_NEEDS_RESTART, pf->state);
+ dev_err(dev, "Rebuild failed, unload and reload driver\n");
}
/**
@@ -5390,6 +5798,232 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
}
/**
+ * ice_bridge_getlink - Get the hardware bridge mode
+ * @skb: skb buff
+ * @pid: process id
+ * @seq: RTNL message seq
+ * @dev: the netdev being configured
+ * @filter_mask: filter mask passed in
+ * @nlflags: netlink flags passed in
+ *
+ * Return the bridge mode (VEB/VEPA)
+ */
+static int
+ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask, int nlflags)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u16 bmode;
+
+ bmode = pf->first_sw->bridge_mode;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
+ filter_mask, NULL);
+}
+
+/**
+ * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
+ * @vsi: Pointer to VSI structure
+ * @bmode: Hardware bridge mode (VEB/VEPA)
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_aqc_vsi_props *vsi_props;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ vsi_props = &vsi->info;
+ ctxt.info = vsi->info;
+
+ if (bmode == BRIDGE_MODE_VEB)
+ /* change from VEPA to VEB mode */
+ ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+ else
+ /* change from VEB to VEPA mode */
+ ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+ ctxt.vsi_num = vsi->vsi_num;
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+ status = ice_aq_update_vsi(hw, &ctxt, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
+ bmode, status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+ /* Update sw flags for book keeping */
+ vsi_props->sw_flags = ctxt.info.sw_flags;
+
+ return 0;
+}
+
+/**
+ * ice_bridge_setlink - Set the hardware bridge mode
+ * @dev: the netdev being configured
+ * @nlh: RTNL message
+ * @flags: bridge setlink flags
+ *
+ * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
+ * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
+ * not already set for all VSIs connected to this switch. And also update the
+ * unicast switch filter rules for the corresponding switch of the netdev.
+ */
+static int
+ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 __always_unused flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_pf *pf = np->vsi->back;
+ struct nlattr *attr, *br_spec;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ struct ice_sw *pf_sw;
+ int rem, v, err = 0;
+
+ pf_sw = pf->first_sw;
+ /* find the attribute in the netlink message */
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+ mode = nla_get_u16(attr);
+ if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
+ return -EINVAL;
+ /* Continue if bridge mode is not being flipped */
+ if (mode == pf_sw->bridge_mode)
+ continue;
+ /* Iterates through the PF VSI list and update the loopback
+ * mode of the VSI
+ */
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+ err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
+ if (err)
+ return err;
+ }
+
+ hw->evb_veb = (mode == BRIDGE_MODE_VEB);
+ /* Update the unicast switch filter rules for the corresponding
+ * switch of the netdev
+ */
+ status = ice_update_sw_rule_bridge_mode(hw);
+ if (status) {
+ netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n",
+ mode, status, hw->adminq.sq_last_status);
+ /* revert hw->evb_veb */
+ hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
+ return -EIO;
+ }
+
+ pf_sw->bridge_mode = mode;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void ice_tx_timeout(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_ring *tx_ring = NULL;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u32 head, val = 0, i;
+ int hung_queue = -1;
+
+ pf->tx_timeout_count++;
+
+ /* find the stopped queue the same way the stack does */
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ struct netdev_queue *q;
+ unsigned long trans_start;
+
+ q = netdev_get_tx_queue(netdev, i);
+ trans_start = q->trans_start;
+ if (netif_xmit_stopped(q) &&
+ time_after(jiffies,
+ (trans_start + netdev->watchdog_timeo))) {
+ hung_queue = i;
+ break;
+ }
+ }
+
+ if (i == netdev->num_tx_queues) {
+ netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
+ } else {
+ /* now that we have an index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_txq; i++) {
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+ if (hung_queue ==
+ vsi->tx_rings[i]->q_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
+ }
+ }
+ }
+ }
+
+ /* Reset recovery level if enough time has elapsed after last timeout.
+ * Also ensure no new reset action happens before next timeout period.
+ */
+ if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
+ pf->tx_timeout_recovery_level = 1;
+ else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
+ netdev->watchdog_timeo)))
+ return;
+
+ if (tx_ring) {
+ head = tx_ring->next_to_clean;
+ /* Read interrupt register */
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ val = rd32(&pf->hw,
+ GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
+ tx_ring->vsi->base_vector - 1));
+
+ netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+ vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
+ head, tx_ring->next_to_use,
+ readl(tx_ring->tail), val);
+ }
+
+ pf->tx_timeout_last_recovery = jiffies;
+ netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
+ pf->tx_timeout_recovery_level, hung_queue);
+
+ switch (pf->tx_timeout_recovery_level) {
+ case 1:
+ set_bit(__ICE_PFR_REQ, pf->state);
+ break;
+ case 2:
+ set_bit(__ICE_CORER_REQ, pf->state);
+ break;
+ case 3:
+ set_bit(__ICE_GLOBR_REQ, pf->state);
+ break;
+ default:
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
+ set_bit(__ICE_DOWN, pf->state);
+ set_bit(__ICE_NEEDS_RESTART, vsi->state);
+ set_bit(__ICE_SERVICE_DIS, pf->state);
+ break;
+ }
+
+ ice_service_task_schedule(pf);
+ pf->tx_timeout_recovery_level++;
+}
+
+/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -5407,6 +6041,11 @@ static int ice_open(struct net_device *netdev)
struct ice_vsi *vsi = np->vsi;
int err;
+ if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
+ netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
+ return -EIO;
+ }
+
netif_carrier_off(netdev);
err = ice_vsi_open(vsi);
@@ -5503,6 +6142,9 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
.ndo_set_features = ice_set_features,
+ .ndo_bridge_getlink = ice_bridge_getlink,
+ .ndo_bridge_setlink = ice_bridge_setlink,
.ndo_fdb_add = ice_fdb_add,
.ndo_fdb_del = ice_fdb_del,
+ .ndo_tx_timeout = ice_tx_timeout,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 295a8cd87fc1..3274c543283c 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -137,7 +137,7 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
if (hw->nvm.blank_nvm_mode)
return 0;
- return ice_acquire_res(hw, ICE_NVM_RES_ID, access);
+ return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index eeae199469b6..9b7b50554952 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -17,7 +17,6 @@ ice_sched_add_root_node(struct ice_port_info *pi,
{
struct ice_sched_node *root;
struct ice_hw *hw;
- u16 max_children;
if (!pi)
return ICE_ERR_PARAM;
@@ -28,8 +27,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,
if (!root)
return ICE_ERR_NO_MEMORY;
- max_children = le16_to_cpu(hw->layer_info[0].max_children);
- root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
+ /* coverity[suspicious_sizeof] */
+ root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
sizeof(*root), GFP_KERNEL);
if (!root->children) {
devm_kfree(ice_hw_to_dev(hw), root);
@@ -100,7 +99,6 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_sched_node *parent;
struct ice_sched_node *node;
struct ice_hw *hw;
- u16 max_children;
if (!pi)
return ICE_ERR_PARAM;
@@ -120,9 +118,10 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
if (!node)
return ICE_ERR_NO_MEMORY;
- max_children = le16_to_cpu(hw->layer_info[layer].max_children);
- if (max_children) {
- node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
+ if (hw->max_children[layer]) {
+ /* coverity[suspicious_sizeof] */
+ node->children = devm_kcalloc(ice_hw_to_dev(hw),
+ hw->max_children[layer],
sizeof(*node), GFP_KERNEL);
if (!node->children) {
devm_kfree(ice_hw_to_dev(hw), node);
@@ -192,14 +191,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
+
buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes);
for (i = 0; i < num_nodes; i++)
buf->teid[i] = cpu_to_le32(node_teids[i]);
+
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
if (status || num_groups_removed != 1)
ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
+
devm_kfree(ice_hw_to_dev(hw), buf);
return status;
}
@@ -592,13 +594,16 @@ static void ice_sched_clear_port(struct ice_port_info *pi)
*/
void ice_sched_cleanup_all(struct ice_hw *hw)
{
- if (!hw || !hw->port_info)
+ if (!hw)
return;
- if (hw->layer_info)
+ if (hw->layer_info) {
devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
+ hw->layer_info = NULL;
+ }
- ice_sched_clear_port(hw->port_info);
+ if (hw->port_info)
+ ice_sched_clear_port(hw->port_info);
hw->num_tx_sched_layers = 0;
hw->num_tx_sched_phys_layers = 0;
@@ -671,9 +676,13 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
ICE_AQC_ELEM_VALID_EIR;
buf->generic[i].data.generic = 0;
buf->generic[i].data.cir_bw.bw_profile_idx =
- ICE_SCHED_DFLT_RL_PROF_ID;
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->generic[i].data.cir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
buf->generic[i].data.eir_bw.bw_profile_idx =
- ICE_SCHED_DFLT_RL_PROF_ID;
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->generic[i].data.eir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
}
status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
@@ -697,7 +706,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
teid = le32_to_cpu(buf->generic[i].node_teid);
new_node = ice_sched_find_node_by_teid(parent, teid);
-
if (!new_node) {
ice_debug(hw, ICE_DBG_SCHED,
"Node is missing for teid =%d\n", teid);
@@ -710,7 +718,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/* add it to previous node sibling pointer */
/* Note: siblings are not linked across branches */
prev = ice_sched_get_first_node(hw, tc_node, layer);
-
if (prev && prev != new_node) {
while (prev->sibling)
prev = prev->sibling;
@@ -760,8 +767,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
return ICE_ERR_PARAM;
/* max children per node per layer */
- max_child_nodes =
- le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
+ max_child_nodes = hw->max_children[parent->tx_sched_layer];
/* current number of children + required nodes exceed max children ? */
if ((parent->num_children + num_nodes) > max_child_nodes) {
@@ -851,78 +857,6 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
}
/**
- * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer
- * @pi: pointer to the port info struct
- * @layer: layer number
- *
- * This function calculates the number of nodes present in the scheduler tree
- * including all the branches for a given layer
- */
-static u16
-ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer)
-{
- struct ice_hw *hw;
- u16 num_nodes = 0;
- u8 i;
-
- if (!pi)
- return num_nodes;
-
- hw = pi->hw;
-
- /* Calculate the number of nodes for all TCs */
- for (i = 0; i < pi->root->num_children; i++) {
- struct ice_sched_node *tc_node, *node;
-
- tc_node = pi->root->children[i];
-
- /* Get the first node */
- node = ice_sched_get_first_node(hw, tc_node, layer);
- if (!node)
- continue;
-
- /* count the siblings */
- while (node) {
- num_nodes++;
- node = node->sibling;
- }
- }
-
- return num_nodes;
-}
-
-/**
- * ice_sched_val_max_nodes - check max number of nodes reached or not
- * @pi: port information structure
- * @new_num_nodes_per_layer: pointer to the new number of nodes array
- *
- * This function checks whether the scheduler tree layers have enough space to
- * add new nodes
- */
-static enum ice_status
-ice_sched_validate_for_max_nodes(struct ice_port_info *pi,
- u16 *new_num_nodes_per_layer)
-{
- struct ice_hw *hw = pi->hw;
- u8 i, qg_layer;
- u16 num_nodes;
-
- qg_layer = ice_sched_get_qgrp_layer(hw);
-
- /* walk through all the layers from SW entry point to qgroup layer */
- for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) {
- num_nodes = ice_sched_get_num_nodes_per_layer(pi, i);
- if (num_nodes + new_num_nodes_per_layer[i] >
- le16_to_cpu(hw->layer_info[i].max_pf_nodes)) {
- ice_debug(hw, ICE_DBG_SCHED,
- "max nodes reached for layer = %d\n", i);
- return ICE_ERR_CFG;
- }
- }
- return 0;
-}
-
-/**
* ice_rm_dflt_leaf_node - remove the default leaf node in the tree
* @pi: port information structure
*
@@ -1003,14 +937,12 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
hw = pi->hw;
/* Query the Default Topology from FW */
- buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES,
- sizeof(*buf), GFP_KERNEL);
+ buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
/* Query default scheduling tree topology */
- status = ice_aq_get_dflt_topo(hw, pi->lport, buf,
- sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES,
+ status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
&num_branches, NULL);
if (status)
goto err_init_port;
@@ -1097,6 +1029,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
{
struct ice_aqc_query_txsched_res_resp *buf;
enum ice_status status = 0;
+ __le16 max_sibl;
+ u8 i;
if (hw->layer_info)
return status;
@@ -1115,7 +1049,20 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
hw->flattened_layers = buf->sched_props.flattening_bitmap;
hw->max_cgds = buf->sched_props.max_pf_cgds;
- hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
+ /* max sibling group size of current layer refers to the max children
+ * of the below layer node.
+ * layer 1 node max children will be layer 2 max sibling group size
+ * layer 2 node max children will be layer 3 max sibling group size
+ * and so on. This array will be populated from root (index 0) to
+ * qgroup layer 7. Leaf node has no children.
+ */
+ for (i = 0; i < hw->num_tx_sched_layers; i++) {
+ max_sibl = buf->layer_props[i].max_sibl_grp_sz;
+ hw->max_children[i] = le16_to_cpu(max_sibl);
+ }
+
+ hw->layer_info = (struct ice_aqc_layer_props *)
+ devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
(hw->num_tx_sched_layers *
sizeof(*hw->layer_info)),
GFP_KERNEL);
@@ -1202,7 +1149,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
u8 qgrp_layer;
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
- max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children);
+ max_children = pi->hw->max_children[qgrp_layer];
list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);
if (!list_elem)
@@ -1278,10 +1225,8 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
/* calculate num nodes from q group to VSI layer */
for (i = qgl; i > vsil; i--) {
- u16 max_children = le16_to_cpu(hw->layer_info[i].max_children);
-
/* round to the next integer if there is a remainder */
- num = DIV_ROUND_UP(num, max_children);
+ num = DIV_ROUND_UP(num, hw->max_children[i]);
/* need at least one node */
num_nodes[i] = num ? num : 1;
@@ -1311,16 +1256,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
u16 num_added = 0;
u8 i, qgl, vsil;
- status = ice_sched_validate_for_max_nodes(pi, num_nodes);
- if (status)
- return status;
-
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
for (i = vsil + 1; i <= qgl; i++) {
if (!parent)
return ICE_ERR_CFG;
+
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i],
&first_node_teid,
@@ -1398,8 +1340,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *node;
- u16 max_child;
- u8 i, vsil;
+ u8 vsil;
+ int i;
vsil = ice_sched_get_vsi_layer(hw);
for (i = vsil; i >= hw->sw_entry_point_layer; i--)
@@ -1412,12 +1354,10 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
/* If intermediate nodes are reached max children
* then add a new one.
*/
- node = ice_sched_get_first_node(hw, tc_node, i);
- max_child = le16_to_cpu(hw->layer_info[i].max_children);
-
+ node = ice_sched_get_first_node(hw, tc_node, (u8)i);
/* scan all the siblings */
while (node) {
- if (node->num_children < max_child)
+ if (node->num_children < hw->max_children[i])
break;
node = node->sibling;
}
@@ -1451,10 +1391,6 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
if (!pi)
return ICE_ERR_PARAM;
- status = ice_sched_validate_for_max_nodes(pi, num_nodes);
- if (status)
- return status;
-
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
@@ -1479,6 +1415,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
if (i == vsil)
parent->vsi_id = vsi_id;
}
+
return 0;
}
@@ -1633,9 +1570,11 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc);
if (status)
return status;
+
vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
if (!vsi_node)
return ICE_ERR_CFG;
+
vsi->vsi_node[tc] = vsi_node;
vsi_node->in_use = true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index 9a95c4ffd7d7..d2dae913d81e 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -20,6 +20,7 @@ enum ice_status {
ICE_ERR_ALREADY_EXISTS = -14,
ICE_ERR_DOES_NOT_EXIST = -15,
ICE_ERR_MAX_LIMIT = -17,
+ ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
ICE_ERR_AQ_ERROR = -100,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 6b7ec2ae5ad6..65b4e1cca6be 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -86,6 +86,35 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
}
/**
+ * ice_init_def_sw_recp - initialize the recipe book keeping tables
+ * @hw: pointer to the hw struct
+ *
+ * Allocate memory for the entire recipe table and initialize the structures/
+ * entries corresponding to basic recipes.
+ */
+enum ice_status
+ice_init_def_sw_recp(struct ice_hw *hw)
+{
+ struct ice_sw_recipe *recps;
+ u8 i;
+
+ recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
+ sizeof(struct ice_sw_recipe), GFP_KERNEL);
+ if (!recps)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ recps[i].root_rid = i;
+ INIT_LIST_HEAD(&recps[i].filt_rules);
+ mutex_init(&recps[i].filt_rule_lock);
+ }
+
+ hw->switch_info->recp_list = recps;
+
+ return 0;
+}
+
+/**
* ice_aq_get_sw_cfg - get switch configuration
* @hw: pointer to the hardware structure
* @buf: pointer to the result buffer
@@ -140,17 +169,17 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
*
* Add a VSI context to the hardware (0x0210)
*/
-enum ice_status
+static enum ice_status
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
- enum ice_status status;
struct ice_aq_desc desc;
+ enum ice_status status;
cmd = &desc.params.vsi_cmd;
- res = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
+ res = &desc.params.add_update_free_vsi_res;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
@@ -175,6 +204,42 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
}
/**
+ * ice_aq_free_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a VSI context struct
+ * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
+ * @cd: pointer to command details structure or NULL
+ *
+ * Free VSI context info from hardware (0x0213)
+ */
+static enum ice_status
+ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_update_free_vsi_resp *resp;
+ struct ice_aqc_add_get_update_free_vsi *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.vsi_cmd;
+ resp = &desc.params.add_update_free_vsi_res;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
+
+ cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
+ if (keep_vsi_alloc)
+ cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status) {
+ vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+ }
+
+ return status;
+}
+
+/**
* ice_aq_update_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a VSI context struct
@@ -192,7 +257,7 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
enum ice_status status;
cmd = &desc.params.vsi_cmd;
- resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
+ resp = &desc.params.add_update_free_vsi_res;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
@@ -212,38 +277,202 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
}
/**
- * ice_aq_free_vsi
+ * ice_update_fltr_vsi_map - update given filter VSI map
+ * @list_head: list for which filters needs to be updated
+ * @list_lock: filter lock which needs to be updated
+ * @old_vsi_num: old VSI HW id
+ * @new_vsi_num: new VSI HW id
+ *
+ * update the VSI map for a given filter list
+ */
+static void
+ice_update_fltr_vsi_map(struct list_head *list_head,
+ struct mutex *list_lock, u16 old_vsi_num,
+ u16 new_vsi_num)
+{
+ struct ice_fltr_mgmt_list_entry *itr;
+
+ mutex_lock(list_lock);
+ if (list_empty(list_head))
+ goto exit_update_map;
+
+ list_for_each_entry(itr, list_head, list_entry) {
+ if (itr->vsi_list_info &&
+ test_bit(old_vsi_num, itr->vsi_list_info->vsi_map)) {
+ clear_bit(old_vsi_num, itr->vsi_list_info->vsi_map);
+ set_bit(new_vsi_num, itr->vsi_list_info->vsi_map);
+ } else if (itr->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
+ itr->fltr_info.fwd_id.vsi_id == old_vsi_num) {
+ itr->fltr_info.fwd_id.vsi_id = new_vsi_num;
+ itr->fltr_info.src = new_vsi_num;
+ }
+ }
+exit_update_map:
+ mutex_unlock(list_lock);
+}
+
+/**
+ * ice_update_all_fltr_vsi_map - update all filters VSI map
+ * @hw: pointer to the hardware structure
+ * @old_vsi_num: old VSI HW id
+ * @new_vsi_num: new VSI HW id
+ *
+ * update all filters VSI map
+ */
+static void
+ice_update_all_fltr_vsi_map(struct ice_hw *hw, u16 old_vsi_num, u16 new_vsi_num)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ u8 i;
+
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ struct list_head *head = &sw->recp_list[i].filt_rules;
+ struct mutex *lock; /* Lock to protect filter rule list */
+
+ lock = &sw->recp_list[i].filt_rule_lock;
+ ice_update_fltr_vsi_map(head, lock, old_vsi_num,
+ new_vsi_num);
+ }
+}
+
+/**
+ * ice_is_vsi_valid - check whether the VSI is valid or not
* @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ *
+ * check whether the VSI is valid or not
+ */
+static bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
+{
+ return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
+}
+
+/**
+ * ice_get_hw_vsi_num - return the hw VSI number
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ *
+ * return the hw VSI number
+ * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
+ */
+static u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
+{
+ return hw->vsi_ctx[vsi_handle]->vsi_num;
+}
+
+/**
+ * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ *
+ * return the VSI context entry for a given VSI handle
+ */
+static struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
+}
+
+/**
+ * ice_save_vsi_ctx - save the VSI context for a given VSI handle
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ * @vsi: VSI context pointer
+ *
+ * save the VSI context entry for a given VSI handle
+ */
+static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_vsi_ctx *vsi)
+{
+ hw->vsi_ctx[vsi_handle] = vsi;
+}
+
+/**
+ * ice_clear_vsi_ctx - clear the VSI context entry
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ *
+ * clear the VSI context entry
+ */
+static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_vsi_ctx *vsi;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (vsi) {
+ devm_kfree(ice_hw_to_dev(hw), vsi);
+ hw->vsi_ctx[vsi_handle] = NULL;
+ }
+}
+
+/**
+ * ice_add_vsi - add VSI context to the hardware and VSI handle list
+ * @hw: pointer to the hw struct
+ * @vsi_handle: unique VSI handle provided by drivers
* @vsi_ctx: pointer to a VSI context struct
- * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
* @cd: pointer to command details structure or NULL
*
- * Get VSI context info from hardware (0x0213)
+ * Add a VSI context to the hardware also add it into the VSI handle list.
+ * If this function gets called after reset for existing VSIs then update
+ * with the new HW VSI number in the corresponding VSI handle list entry.
*/
enum ice_status
-ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
- bool keep_vsi_alloc, struct ice_sq_cd *cd)
+ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
{
- struct ice_aqc_add_update_free_vsi_resp *resp;
- struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct ice_vsi_ctx *tmp_vsi_ctx;
enum ice_status status;
- cmd = &desc.params.vsi_cmd;
- resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
+ if (vsi_handle >= ICE_MAX_VSI)
+ return ICE_ERR_PARAM;
+ status = ice_aq_add_vsi(hw, vsi_ctx, cd);
+ if (status)
+ return status;
+ tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!tmp_vsi_ctx) {
+ /* Create a new vsi context */
+ tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*tmp_vsi_ctx), GFP_KERNEL);
+ if (!tmp_vsi_ctx) {
+ ice_aq_free_vsi(hw, vsi_ctx, false, cd);
+ return ICE_ERR_NO_MEMORY;
+ }
+ *tmp_vsi_ctx = *vsi_ctx;
+ ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
+ } else {
+ /* update with new HW VSI num */
+ if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num) {
+ /* update all filter lists with new HW VSI num */
+ ice_update_all_fltr_vsi_map(hw, tmp_vsi_ctx->vsi_num,
+ vsi_ctx->vsi_num);
+ tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
+ }
+ }
- cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
- if (keep_vsi_alloc)
- cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
+ return status;
+}
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
- if (!status) {
- vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
- vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
- }
+/**
+ * ice_free_vsi- free VSI context from hardware and VSI handle list
+ * @hw: pointer to the hw struct
+ * @vsi_handle: unique VSI handle
+ * @vsi_ctx: pointer to a VSI context struct
+ * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
+ * @cd: pointer to command details structure or NULL
+ *
+ * Free VSI context info from hardware as well as from VSI handle list
+ */
+enum ice_status
+ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd)
+{
+ enum ice_status status;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
+ status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
+ if (!status)
+ ice_clear_vsi_ctx(hw, vsi_handle);
return status;
}
@@ -464,8 +693,9 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
{
u16 vlan_id = ICE_MAX_VLAN_ID + 1;
- u8 eth_hdr[DUMMY_ETH_HDR_LEN];
void *daddr = NULL;
+ u16 eth_hdr_sz;
+ u8 *eth_hdr;
u32 act = 0;
__be16 *off;
@@ -477,8 +707,11 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
return;
}
+ eth_hdr_sz = sizeof(dummy_eth_header);
+ eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
+
/* initialize the ether header with a dummy header */
- memcpy(eth_hdr, dummy_eth_header, sizeof(dummy_eth_header));
+ memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
ice_fill_sw_info(hw, f_info);
switch (f_info->fltr_act) {
@@ -536,7 +769,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
daddr = f_info->l_data.ethertype_mac.mac_addr;
/* fall-through */
case ICE_SW_LKUP_ETHERTYPE:
- off = (__be16 *)&eth_hdr[ICE_ETH_ETHTYPE_OFFSET];
+ off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
break;
case ICE_SW_LKUP_MAC_VLAN:
@@ -563,18 +796,16 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
if (daddr)
- ether_addr_copy(&eth_hdr[ICE_ETH_DA_OFFSET], daddr);
+ ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
if (!(vlan_id > ICE_MAX_VLAN_ID)) {
- off = (__be16 *)&eth_hdr[ICE_ETH_VLAN_TCI_OFFSET];
+ off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
*off = cpu_to_be16(vlan_id);
}
/* Create the switch rule with the final dummy Ethernet header */
if (opc != ice_aqc_opc_update_sw_rules)
- s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(sizeof(eth_hdr));
-
- memcpy(s_rule->pdata.lkup_tx_rx.hdr, eth_hdr, sizeof(eth_hdr));
+ s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
}
/**
@@ -816,10 +1047,10 @@ static enum ice_status
ice_create_pkt_fwd_rule(struct ice_hw *hw,
struct ice_fltr_list_entry *f_entry)
{
- struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *fm_entry;
struct ice_aqc_sw_rules_elem *s_rule;
enum ice_sw_lkup_type l_type;
+ struct ice_sw_recipe *recp;
enum ice_status status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
@@ -860,31 +1091,9 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
* calls remove filter AQ command
*/
l_type = fm_entry->fltr_info.lkup_type;
- if (l_type == ICE_SW_LKUP_MAC) {
- mutex_lock(&sw->mac_list_lock);
- list_add(&fm_entry->list_entry, &sw->mac_list_head);
- mutex_unlock(&sw->mac_list_lock);
- } else if (l_type == ICE_SW_LKUP_VLAN) {
- mutex_lock(&sw->vlan_list_lock);
- list_add(&fm_entry->list_entry, &sw->vlan_list_head);
- mutex_unlock(&sw->vlan_list_lock);
- } else if (l_type == ICE_SW_LKUP_ETHERTYPE ||
- l_type == ICE_SW_LKUP_ETHERTYPE_MAC) {
- mutex_lock(&sw->eth_m_list_lock);
- list_add(&fm_entry->list_entry, &sw->eth_m_list_head);
- mutex_unlock(&sw->eth_m_list_lock);
- } else if (l_type == ICE_SW_LKUP_PROMISC ||
- l_type == ICE_SW_LKUP_PROMISC_VLAN) {
- mutex_lock(&sw->promisc_list_lock);
- list_add(&fm_entry->list_entry, &sw->promisc_list_head);
- mutex_unlock(&sw->promisc_list_lock);
- } else if (fm_entry->fltr_info.lkup_type == ICE_SW_LKUP_MAC_VLAN) {
- mutex_lock(&sw->mac_vlan_list_lock);
- list_add(&fm_entry->list_entry, &sw->mac_vlan_list_head);
- mutex_unlock(&sw->mac_vlan_list_lock);
- } else {
- status = ICE_ERR_NOT_IMPL;
- }
+ recp = &hw->switch_info->recp_list[l_type];
+ list_add(&fm_entry->list_entry, &recp->filt_rules);
+
ice_create_pkt_fwd_rule_exit:
devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
@@ -893,19 +1102,15 @@ ice_create_pkt_fwd_rule_exit:
/**
* ice_update_pkt_fwd_rule
* @hw: pointer to the hardware structure
- * @rule_id: rule of previously created switch rule to update
- * @vsi_list_id: VSI list id to be updated with
- * @f_info: ice_fltr_info to pull other information for switch rule
+ * @f_info: filter information for switch rule
*
* Call AQ command to update a previously created switch rule with a
* VSI list id
*/
static enum ice_status
-ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
- struct ice_fltr_info f_info)
+ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
struct ice_aqc_sw_rules_elem *s_rule;
- struct ice_fltr_info tmp_fltr;
enum ice_status status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
@@ -913,14 +1118,9 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
if (!s_rule)
return ICE_ERR_NO_MEMORY;
- tmp_fltr = f_info;
- tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
- tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+ ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
- ice_fill_sw_rule(hw, &tmp_fltr, s_rule,
- ice_aqc_opc_update_sw_rules);
-
- s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
+ s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
/* Update switch rule with new rule set to forward VSI list */
status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
@@ -931,7 +1131,48 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
}
/**
- * ice_handle_vsi_list_mgmt
+ * ice_update_sw_rule_bridge_mode
+ * @hw: pointer to the hw struct
+ *
+ * Updates unicast switch filter rules based on VEB/VEPA mode
+ */
+enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ enum ice_status status = 0;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
+
+ mutex_lock(rule_lock);
+ list_for_each_entry(fm_entry, rule_head, list_entry) {
+ struct ice_fltr_info *fi = &fm_entry->fltr_info;
+ u8 *addr = fi->l_data.mac.mac_addr;
+
+ /* Update unicast Tx rules to reflect the selected
+ * VEB/VEPA mode
+ */
+ if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
+ (fi->fltr_act == ICE_FWD_TO_VSI ||
+ fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
+ fi->fltr_act == ICE_FWD_TO_Q ||
+ fi->fltr_act == ICE_FWD_TO_QGRP)) {
+ status = ice_update_pkt_fwd_rule(hw, fi);
+ if (status)
+ break;
+ }
+ }
+
+ mutex_unlock(rule_lock);
+
+ return status;
+}
+
+/**
+ * ice_add_update_vsi_list
* @hw: pointer to the hardware structure
* @m_entry: pointer to current filter management list entry
* @cur_fltr: filter information from the book keeping entry
@@ -952,10 +1193,10 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
* using the update switch rule command
*/
static enum ice_status
-ice_handle_vsi_list_mgmt(struct ice_hw *hw,
- struct ice_fltr_mgmt_list_entry *m_entry,
- struct ice_fltr_info *cur_fltr,
- struct ice_fltr_info *new_fltr)
+ice_add_update_vsi_list(struct ice_hw *hw,
+ struct ice_fltr_mgmt_list_entry *m_entry,
+ struct ice_fltr_info *cur_fltr,
+ struct ice_fltr_info *new_fltr)
{
enum ice_status status = 0;
u16 vsi_list_id = 0;
@@ -975,8 +1216,8 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw,
* a part of a VSI list. So, create a VSI list with the old and
* new VSIs.
*/
+ struct ice_fltr_info tmp_fltr;
u16 vsi_id_arr[2];
- u16 fltr_rule;
/* A rule already exists with the new VSI being added */
if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id)
@@ -990,12 +1231,14 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw,
if (status)
return status;
- fltr_rule = cur_fltr->fltr_rule_id;
+ tmp_fltr = *new_fltr;
+ tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
/* Update the previous switch rule of "MAC forward to VSI" to
* "MAC fwd to VSI list"
*/
- status = ice_update_pkt_fwd_rule(hw, fltr_rule, vsi_list_id,
- *new_fltr);
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
if (status)
return status;
@@ -1040,54 +1283,245 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw,
}
/**
- * ice_find_mac_entry
+ * ice_find_rule_entry - Search a rule entry
* @hw: pointer to the hardware structure
- * @mac_addr: MAC address to search for
+ * @recp_id: lookup type for which the specified rule needs to be searched
+ * @f_info: rule information
*
- * Helper function to search for a MAC entry using a given MAC address
- * Returns pointer to the entry if found.
+ * Helper function to search for a given rule entry
+ * Returns pointer to entry storing the rule if found
*/
static struct ice_fltr_mgmt_list_entry *
-ice_find_mac_entry(struct ice_hw *hw, u8 *mac_addr)
+ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
{
- struct ice_fltr_mgmt_list_entry *m_list_itr, *mac_ret = NULL;
+ struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
struct ice_switch_info *sw = hw->switch_info;
-
- mutex_lock(&sw->mac_list_lock);
- list_for_each_entry(m_list_itr, &sw->mac_list_head, list_entry) {
- u8 *buf = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
-
- if (ether_addr_equal(buf, mac_addr)) {
- mac_ret = m_list_itr;
+ struct list_head *list_head;
+
+ list_head = &sw->recp_list[recp_id].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
+ sizeof(f_info->l_data)) &&
+ f_info->flag == list_itr->fltr_info.flag) {
+ ret = list_itr;
break;
}
}
- mutex_unlock(&sw->mac_list_lock);
- return mac_ret;
+ return ret;
}
/**
- * ice_add_shared_mac - Add one MAC shared filter rule
+ * ice_add_rule_internal - add rule for a given lookup type
* @hw: pointer to the hardware structure
+ * @recp_id: lookup type (recipe id) for which rule has to be added
* @f_entry: structure containing MAC forwarding information
*
- * Adds or updates the book keeping list for the MAC addresses
+ * Adds or updates the rule lists for a given recipe
*/
static enum ice_status
-ice_add_shared_mac(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
+ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
+ struct ice_fltr_list_entry *f_entry)
{
+ struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_info *new_fltr, *cur_fltr;
struct ice_fltr_mgmt_list_entry *m_entry;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
- new_fltr = &f_entry->fltr_info;
+ rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
- m_entry = ice_find_mac_entry(hw, &new_fltr->l_data.mac.mac_addr[0]);
- if (!m_entry)
+ mutex_lock(rule_lock);
+ new_fltr = &f_entry->fltr_info;
+ if (new_fltr->flag & ICE_FLTR_RX)
+ new_fltr->src = hw->port_info->lport;
+ else if (new_fltr->flag & ICE_FLTR_TX)
+ new_fltr->src = f_entry->fltr_info.fwd_id.vsi_id;
+
+ m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
+ if (!m_entry) {
+ mutex_unlock(rule_lock);
return ice_create_pkt_fwd_rule(hw, f_entry);
+ }
cur_fltr = &m_entry->fltr_info;
+ status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
+ mutex_unlock(rule_lock);
- return ice_handle_vsi_list_mgmt(hw, m_entry, cur_fltr, new_fltr);
+ return status;
+}
+
+/**
+ * ice_remove_vsi_list_rule
+ * @hw: pointer to the hardware structure
+ * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @lkup_type: switch rule filter lookup type
+ *
+ * The VSI list should be emptied before this function is called to remove the
+ * VSI list.
+ */
+static enum ice_status
+ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
+ enum ice_sw_lkup_type lkup_type)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status status;
+ u16 s_rule_size;
+
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
+ s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+
+ /* Free the vsi_list resource that we allocated. It is assumed that the
+ * list is empty at this point.
+ */
+ status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
+ ice_aqc_opc_free_res);
+
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+ return status;
+}
+
+/**
+ * ice_rem_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @vsi_id: ID of the VSI to remove
+ * @fm_list: filter management entry for which the VSI list management needs to
+ * be done
+ */
+static enum ice_status
+ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_id,
+ struct ice_fltr_mgmt_list_entry *fm_list)
+{
+ enum ice_sw_lkup_type lkup_type;
+ enum ice_status status = 0;
+ u16 vsi_list_id;
+
+ if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
+ fm_list->vsi_count == 0)
+ return ICE_ERR_PARAM;
+
+ /* A rule with the VSI being removed does not exist */
+ if (!test_bit(vsi_id, fm_list->vsi_list_info->vsi_map))
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ lkup_type = fm_list->fltr_info.lkup_type;
+ vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
+
+ status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ fm_list->vsi_count--;
+ clear_bit(vsi_id, fm_list->vsi_list_info->vsi_map);
+
+ if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
+ (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
+ struct ice_vsi_list_map_info *vsi_list_info =
+ fm_list->vsi_list_info;
+ u16 rem_vsi_id;
+
+ rem_vsi_id = find_first_bit(vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+ if (rem_vsi_id == ICE_MAX_VSI)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ status = ice_update_vsi_list_rule(hw, &rem_vsi_id, 1,
+ vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ /* Remove the VSI list since it is no longer used */
+ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
+ if (status)
+ return status;
+
+ /* Change the list entry action from VSI_LIST to VSI */
+ fm_list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ fm_list->fltr_info.fwd_id.vsi_id = rem_vsi_id;
+
+ list_del(&vsi_list_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
+ fm_list->vsi_list_info = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * ice_remove_rule_internal - Remove a filter rule of a given type
+ * @hw: pointer to the hardware structure
+ * @recp_id: recipe id for which the rule needs to removed
+ * @f_entry: rule entry containing filter information
+ */
+static enum ice_status
+ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
+ struct ice_fltr_list_entry *f_entry)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *list_elem;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
+ bool remove_rule = false;
+ u16 vsi_id;
+
+ rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
+ mutex_lock(rule_lock);
+ list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
+ if (!list_elem) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit;
+ }
+
+ if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
+ remove_rule = true;
+ } else {
+ vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
+ status = ice_rem_update_vsi_list(hw, vsi_id, list_elem);
+ if (status)
+ goto exit;
+ /* if vsi count goes to zero after updating the vsi list */
+ if (list_elem->vsi_count == 0)
+ remove_rule = true;
+ }
+
+ if (remove_rule) {
+ /* Remove the lookup rule */
+ struct ice_aqc_sw_rules_elem *s_rule;
+
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw),
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
+ GFP_KERNEL);
+ if (!s_rule) {
+ status = ICE_ERR_NO_MEMORY;
+ goto exit;
+ }
+
+ ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
+ ice_aqc_opc_remove_sw_rules);
+
+ status = ice_aq_sw_rules(hw, s_rule,
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
+ ice_aqc_opc_remove_sw_rules, NULL);
+ if (status)
+ goto exit;
+
+ /* Remove a book keeping from the list */
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+
+ list_del(&list_elem->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), list_elem);
+ }
+exit:
+ mutex_unlock(rule_lock);
+ return status;
}
/**
@@ -1106,7 +1540,10 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
+ struct list_head *rule_head;
u16 elem_sent, total_elem_left;
+ struct ice_switch_info *sw;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status = 0;
u16 num_unicast = 0;
u16 s_rule_size;
@@ -1114,48 +1551,62 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
if (!m_list || !hw)
return ICE_ERR_PARAM;
+ s_rule = NULL;
+ sw = hw->switch_info;
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry(m_list_itr, m_list, list_entry) {
u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
- if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
- return ICE_ERR_PARAM;
- if (is_zero_ether_addr(add))
+ m_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
+ is_zero_ether_addr(add))
return ICE_ERR_PARAM;
if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
/* Don't overwrite the unicast address */
- if (ice_find_mac_entry(hw, add))
+ mutex_lock(rule_lock);
+ if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
+ &m_list_itr->fltr_info)) {
+ mutex_unlock(rule_lock);
return ICE_ERR_ALREADY_EXISTS;
+ }
+ mutex_unlock(rule_lock);
num_unicast++;
} else if (is_multicast_ether_addr(add) ||
(is_unicast_ether_addr(add) && hw->ucast_shared)) {
- status = ice_add_shared_mac(hw, m_list_itr);
- if (status) {
- m_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
- return status;
- }
- m_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
+ m_list_itr->status =
+ ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
+ m_list_itr);
+ if (m_list_itr->status)
+ return m_list_itr->status;
}
}
+ mutex_lock(rule_lock);
/* Exit if no suitable entries were found for adding bulk switch rule */
- if (!num_unicast)
- return 0;
+ if (!num_unicast) {
+ status = 0;
+ goto ice_add_mac_exit;
+ }
+
+ rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
/* Allocate switch rule buffer for the bulk update for unicast */
s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ if (!s_rule) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_add_mac_exit;
+ }
r_iter = s_rule;
list_for_each_entry(m_list_itr, m_list, list_entry) {
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *addr = &f_info->l_data.mac.mac_addr[0];
+ u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
- if (is_unicast_ether_addr(addr)) {
- ice_fill_sw_rule(hw, &m_list_itr->fltr_info,
- r_iter, ice_aqc_opc_add_sw_rules);
+ if (is_unicast_ether_addr(mac_addr)) {
+ ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
+ ice_aqc_opc_add_sw_rules);
r_iter = (struct ice_aqc_sw_rules_elem *)
((u8 *)r_iter + s_rule_size);
}
@@ -1183,11 +1634,10 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
r_iter = s_rule;
list_for_each_entry(m_list_itr, m_list, list_entry) {
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *addr = &f_info->l_data.mac.mac_addr[0];
- struct ice_switch_info *sw = hw->switch_info;
+ u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
struct ice_fltr_mgmt_list_entry *fm_entry;
- if (is_unicast_ether_addr(addr)) {
+ if (is_unicast_ether_addr(mac_addr)) {
f_info->fltr_rule_id =
le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
f_info->fltr_act = ICE_FWD_TO_VSI;
@@ -1203,46 +1653,21 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
/* The book keeping entries will get removed when
* base driver calls remove filter AQ command
*/
- mutex_lock(&sw->mac_list_lock);
- list_add(&fm_entry->list_entry, &sw->mac_list_head);
- mutex_unlock(&sw->mac_list_lock);
+ list_add(&fm_entry->list_entry, rule_head);
r_iter = (struct ice_aqc_sw_rules_elem *)
((u8 *)r_iter + s_rule_size);
}
}
ice_add_mac_exit:
- devm_kfree(ice_hw_to_dev(hw), s_rule);
+ mutex_unlock(rule_lock);
+ if (s_rule)
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
/**
- * ice_find_vlan_entry
- * @hw: pointer to the hardware structure
- * @vlan_id: VLAN id to search for
- *
- * Helper function to search for a VLAN entry using a given VLAN id
- * Returns pointer to the entry if found.
- */
-static struct ice_fltr_mgmt_list_entry *
-ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id)
-{
- struct ice_fltr_mgmt_list_entry *vlan_list_itr, *vlan_ret = NULL;
- struct ice_switch_info *sw = hw->switch_info;
-
- mutex_lock(&sw->vlan_list_lock);
- list_for_each_entry(vlan_list_itr, &sw->vlan_list_head, list_entry)
- if (vlan_list_itr->fltr_info.l_data.vlan.vlan_id == vlan_id) {
- vlan_ret = vlan_list_itr;
- break;
- }
-
- mutex_unlock(&sw->vlan_list_lock);
- return vlan_ret;
-}
-
-/**
* ice_add_vlan_internal - Add one VLAN based filter rule
* @hw: pointer to the hardware structure
* @f_entry: filter entry containing one VLAN information
@@ -1250,20 +1675,22 @@ ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id)
static enum ice_status
ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
{
+ struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_info *new_fltr, *cur_fltr;
struct ice_fltr_mgmt_list_entry *v_list_itr;
- u16 vlan_id;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
new_fltr = &f_entry->fltr_info;
/* VLAN id should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
return ICE_ERR_PARAM;
- vlan_id = new_fltr->l_data.vlan.vlan_id;
- v_list_itr = ice_find_vlan_entry(hw, vlan_id);
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ mutex_lock(rule_lock);
+ v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
if (!v_list_itr) {
u16 vsi_id = ICE_VSI_INVAL_ID;
- enum ice_status status;
u16 vsi_list_id = 0;
if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
@@ -1277,26 +1704,33 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
&vsi_list_id,
lkup_type);
if (status)
- return status;
+ goto exit;
new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
new_fltr->fwd_id.vsi_list_id = vsi_list_id;
}
status = ice_create_pkt_fwd_rule(hw, f_entry);
if (!status && vsi_id != ICE_VSI_INVAL_ID) {
- v_list_itr = ice_find_vlan_entry(hw, vlan_id);
- if (!v_list_itr)
- return ICE_ERR_DOES_NOT_EXIST;
+ v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
+ new_fltr);
+ if (!v_list_itr) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit;
+ }
v_list_itr->vsi_list_info =
ice_create_vsi_list_map(hw, &vsi_id, 1,
vsi_list_id);
}
- return status;
+ goto exit;
}
cur_fltr = &v_list_itr->fltr_info;
- return ice_handle_vsi_list_mgmt(hw, v_list_itr, cur_fltr, new_fltr);
+ status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, new_fltr);
+
+exit:
+ mutex_unlock(rule_lock);
+ return status;
}
/**
@@ -1313,326 +1747,44 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
return ICE_ERR_PARAM;
list_for_each_entry(v_list_itr, v_list, list_entry) {
- enum ice_status status;
-
if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
return ICE_ERR_PARAM;
-
- status = ice_add_vlan_internal(hw, v_list_itr);
- if (status) {
- v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
- return status;
- }
- v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
+ v_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
}
return 0;
}
/**
- * ice_remove_vsi_list_rule
+ * ice_rem_sw_rule_info
* @hw: pointer to the hardware structure
- * @vsi_list_id: VSI list id generated as part of allocate resource
- * @lkup_type: switch rule filter lookup type
+ * @rule_head: pointer to the switch list structure that we want to delete
*/
-static enum ice_status
-ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
- enum ice_sw_lkup_type lkup_type)
-{
- struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
- u16 s_rule_size;
-
- s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
- s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
- s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
- /* FW expects number of VSIs in vsi_list resource to be 0 for clear
- * command. Since memory is zero'ed out during initialization, it's not
- * necessary to explicitly initialize the variable to 0.
- */
-
- status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1,
- ice_aqc_opc_remove_sw_rules, NULL);
- if (!status)
- /* Free the vsi_list resource that we allocated */
- status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
- ice_aqc_opc_free_res);
-
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- return status;
-}
-
-/**
- * ice_handle_rem_vsi_list_mgmt
- * @hw: pointer to the hardware structure
- * @vsi_id: ID of the VSI to remove
- * @fm_list_itr: filter management entry for which the VSI list management
- * needs to be done
- */
-static enum ice_status
-ice_handle_rem_vsi_list_mgmt(struct ice_hw *hw, u16 vsi_id,
- struct ice_fltr_mgmt_list_entry *fm_list_itr)
+static void
+ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
{
- struct ice_switch_info *sw = hw->switch_info;
- enum ice_status status = 0;
- enum ice_sw_lkup_type lkup_type;
- bool is_last_elem = true;
- bool conv_list = false;
- bool del_list = false;
- u16 vsi_list_id;
+ if (!list_empty(rule_head)) {
+ struct ice_fltr_mgmt_list_entry *entry;
+ struct ice_fltr_mgmt_list_entry *tmp;
- lkup_type = fm_list_itr->fltr_info.lkup_type;
- vsi_list_id = fm_list_itr->fltr_info.fwd_id.vsi_list_id;
-
- if (fm_list_itr->vsi_count > 1) {
- status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
- true,
- ice_aqc_opc_update_sw_rules,
- lkup_type);
- if (status)
- return status;
- fm_list_itr->vsi_count--;
- is_last_elem = false;
- clear_bit(vsi_id, fm_list_itr->vsi_list_info->vsi_map);
- }
-
- /* For non-VLAN rules that forward packets to a VSI list, convert them
- * to forwarding packets to a VSI if there is only one VSI left in the
- * list. Unused lists are then removed.
- * VLAN rules need to use VSI lists even with only one VSI.
- */
- if (fm_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST) {
- if (lkup_type == ICE_SW_LKUP_VLAN) {
- del_list = is_last_elem;
- } else if (fm_list_itr->vsi_count == 1) {
- conv_list = true;
- del_list = true;
+ list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
+ list_del(&entry->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), entry);
}
}
-
- if (del_list) {
- /* Remove the VSI list since it is no longer used */
- struct ice_vsi_list_map_info *vsi_list_info =
- fm_list_itr->vsi_list_info;
-
- status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
- if (status)
- return status;
-
- if (conv_list) {
- u16 rem_vsi_id;
-
- rem_vsi_id = find_first_bit(vsi_list_info->vsi_map,
- ICE_MAX_VSI);
-
- /* Error out when the expected last element is not in
- * the VSI list map
- */
- if (rem_vsi_id == ICE_MAX_VSI)
- return ICE_ERR_OUT_OF_RANGE;
-
- /* Change the list entry action from VSI_LIST to VSI */
- fm_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- fm_list_itr->fltr_info.fwd_id.vsi_id = rem_vsi_id;
- }
-
- list_del(&vsi_list_info->list_entry);
- devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
- fm_list_itr->vsi_list_info = NULL;
- }
-
- if (conv_list) {
- /* Convert the rule's forward action to forwarding packets to
- * a VSI
- */
- struct ice_aqc_sw_rules_elem *s_rule;
-
- s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_ETH_HDR_SIZE,
- GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
- ice_aqc_opc_update_sw_rules);
-
- s_rule->pdata.lkup_tx_rx.index =
- cpu_to_le16(fm_list_itr->fltr_info.fltr_rule_id);
-
- status = ice_aq_sw_rules(hw, s_rule,
- ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
- ice_aqc_opc_update_sw_rules, NULL);
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- if (status)
- return status;
- }
-
- if (is_last_elem) {
- /* Remove the lookup rule */
- struct ice_aqc_sw_rules_elem *s_rule;
-
- s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
- GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
- ice_aqc_opc_remove_sw_rules);
-
- status = ice_aq_sw_rules(hw, s_rule,
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
- ice_aqc_opc_remove_sw_rules, NULL);
- if (status)
- return status;
-
- /* Remove a book keeping entry from the MAC address list */
- mutex_lock(&sw->mac_list_lock);
- list_del(&fm_list_itr->list_entry);
- mutex_unlock(&sw->mac_list_lock);
- devm_kfree(ice_hw_to_dev(hw), fm_list_itr);
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- }
- return status;
-}
-
-/**
- * ice_remove_mac_entry
- * @hw: pointer to the hardware structure
- * @f_entry: structure containing MAC forwarding information
- */
-static enum ice_status
-ice_remove_mac_entry(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
-{
- struct ice_fltr_mgmt_list_entry *m_entry;
- u16 vsi_id;
- u8 *add;
-
- add = &f_entry->fltr_info.l_data.mac.mac_addr[0];
-
- m_entry = ice_find_mac_entry(hw, add);
- if (!m_entry)
- return ICE_ERR_PARAM;
-
- vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
- return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, m_entry);
}
/**
- * ice_remove_mac - remove a MAC address based filter rule
- * @hw: pointer to the hardware structure
- * @m_list: list of MAC addresses and forwarding information
- *
- * This function removes either a MAC filter rule or a specific VSI from a
- * VSI list for a multicast MAC address.
- *
- * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
- * ice_add_mac. Caller should be aware that this call will only work if all
- * the entries passed into m_list were added previously. It will not attempt to
- * do a partial remove of entries that were found.
- */
-enum ice_status
-ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
-{
- struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
- u8 s_rule_size = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
- struct ice_switch_info *sw = hw->switch_info;
- struct ice_fltr_mgmt_list_entry *m_entry;
- struct ice_fltr_list_entry *m_list_itr;
- u16 elem_sent, total_elem_left;
- enum ice_status status = 0;
- u16 num_unicast = 0;
-
- if (!m_list)
- return ICE_ERR_PARAM;
-
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
-
- if (is_unicast_ether_addr(addr) && !hw->ucast_shared)
- num_unicast++;
- else if (is_multicast_ether_addr(addr) ||
- (is_unicast_ether_addr(addr) && hw->ucast_shared))
- ice_remove_mac_entry(hw, m_list_itr);
- }
-
- /* Exit if no unicast addresses found. Multicast switch rules
- * were added individually
- */
- if (!num_unicast)
- return 0;
-
- /* Allocate switch rule buffer for the bulk update for unicast */
- s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
- GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
-
- if (is_unicast_ether_addr(addr)) {
- m_entry = ice_find_mac_entry(hw, addr);
- if (!m_entry) {
- status = ICE_ERR_DOES_NOT_EXIST;
- goto ice_remove_mac_exit;
- }
-
- ice_fill_sw_rule(hw, &m_entry->fltr_info,
- r_iter, ice_aqc_opc_remove_sw_rules);
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + s_rule_size);
- }
- }
-
- /* Call AQ bulk switch rule update for all unicast addresses */
- r_iter = s_rule;
- /* Call AQ switch rule in AQ_MAX chunk */
- for (total_elem_left = num_unicast; total_elem_left > 0;
- total_elem_left -= elem_sent) {
- struct ice_aqc_sw_rules_elem *entry = r_iter;
-
- elem_sent = min(total_elem_left,
- (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
- status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
- elem_sent, ice_aqc_opc_remove_sw_rules,
- NULL);
- if (status)
- break;
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + s_rule_size);
- }
-
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
-
- if (is_unicast_ether_addr(addr)) {
- m_entry = ice_find_mac_entry(hw, addr);
- if (!m_entry)
- return ICE_ERR_OUT_OF_RANGE;
- mutex_lock(&sw->mac_list_lock);
- list_del(&m_entry->list_entry);
- mutex_unlock(&sw->mac_list_lock);
- devm_kfree(ice_hw_to_dev(hw), m_entry);
- }
- }
-
-ice_remove_mac_exit:
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- return status;
-}
-
-/**
- * ice_cfg_dflt_vsi - add filter rule to set/unset given VSI as default
- * VSI for the switch (represented by swid)
+ * ice_cfg_dflt_vsi - change state of VSI to set/clear default
* @hw: pointer to the hardware structure
* @vsi_id: number of VSI to set as default
* @set: true to add the above mentioned switch rule, false to remove it
* @direction: ICE_FLTR_RX or ICE_FLTR_TX
+ *
+ * add filter rule to set/unset given VSI as default VSI for the switch
+ * (represented by swid)
*/
enum ice_status
ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
@@ -1704,26 +1856,38 @@ out:
}
/**
- * ice_remove_vlan_internal - Remove one VLAN based filter rule
+ * ice_remove_mac - remove a MAC address based filter rule
* @hw: pointer to the hardware structure
- * @f_entry: filter entry containing one VLAN information
+ * @m_list: list of MAC addresses and forwarding information
+ *
+ * This function removes either a MAC filter rule or a specific VSI from a
+ * VSI list for a multicast MAC address.
+ *
+ * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
+ * ice_add_mac. Caller should be aware that this call will only work if all
+ * the entries passed into m_list were added previously. It will not attempt to
+ * do a partial remove of entries that were found.
*/
-static enum ice_status
-ice_remove_vlan_internal(struct ice_hw *hw,
- struct ice_fltr_list_entry *f_entry)
+enum ice_status
+ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
- struct ice_fltr_info *new_fltr;
- struct ice_fltr_mgmt_list_entry *v_list_elem;
- u16 vsi_id;
+ struct ice_fltr_list_entry *list_itr;
- new_fltr = &f_entry->fltr_info;
-
- v_list_elem = ice_find_vlan_entry(hw, new_fltr->l_data.vlan.vlan_id);
- if (!v_list_elem)
+ if (!m_list)
return ICE_ERR_PARAM;
- vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
- return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, v_list_elem);
+ list_for_each_entry(list_itr, m_list, list_entry) {
+ enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_MAC)
+ return ICE_ERR_PARAM;
+ list_itr->status = ice_remove_rule_internal(hw,
+ ICE_SW_LKUP_MAC,
+ list_itr);
+ if (list_itr->status)
+ return list_itr->status;
+ }
+ return 0;
}
/**
@@ -1735,20 +1899,78 @@ enum ice_status
ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr;
- enum ice_status status = 0;
if (!v_list || !hw)
return ICE_ERR_PARAM;
list_for_each_entry(v_list_itr, v_list, list_entry) {
- status = ice_remove_vlan_internal(hw, v_list_itr);
- if (status) {
- v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
- return status;
- }
- v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
+ enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_VLAN)
+ return ICE_ERR_PARAM;
+ v_list_itr->status = ice_remove_rule_internal(hw,
+ ICE_SW_LKUP_VLAN,
+ v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
}
- return status;
+ return 0;
+}
+
+/**
+ * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
+ * @fm_entry: filter entry to inspect
+ * @vsi_id: ID of VSI to compare with filter info
+ */
+static bool
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_id)
+{
+ return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
+ fm_entry->fltr_info.fwd_id.vsi_id == vsi_id) ||
+ (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+ (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map))));
+}
+
+/**
+ * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
+ * @hw: pointer to the hardware structure
+ * @vsi_id: ID of VSI to remove filters from
+ * @vsi_list_head: pointer to the list to add entry to
+ * @fi: pointer to fltr_info of filter entry to copy & add
+ *
+ * Helper function, used when creating a list of filters to remove from
+ * a specific VSI. The entry added to vsi_list_head is a COPY of the
+ * original filter entry, with the exception of fltr_info.fltr_act and
+ * fltr_info.fwd_id fields. These are set such that later logic can
+ * extract which VSI to remove the fltr from, and pass on that information.
+ */
+static enum ice_status
+ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
+ struct list_head *vsi_list_head,
+ struct ice_fltr_info *fi)
+{
+ struct ice_fltr_list_entry *tmp;
+
+ /* this memory is freed up in the caller function
+ * once filters for this VSI are removed
+ */
+ tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ tmp->fltr_info = *fi;
+
+ /* Overwrite these fields to indicate which VSI to remove filter from,
+ * so find and remove logic can extract the information from the
+ * list entries. Note that original entries will still have proper
+ * values.
+ */
+ tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp->fltr_info.fwd_id.vsi_id = vsi_id;
+
+ list_add(&tmp->list_entry, vsi_list_head);
+
+ return 0;
}
/**
@@ -1757,6 +1979,12 @@ ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
* @vsi_id: ID of VSI to remove filters from
* @lkup_list_head: pointer to the list that has certain lookup type filters
* @vsi_list_head: pointer to the list pertaining to VSI with vsi_id
+ *
+ * Locates all filters in lkup_list_head that are used by the given VSI,
+ * and adds COPIES of those entries to vsi_list_head (intended to be used
+ * to remove the listed filters).
+ * Note that this means all entries in vsi_list_head must be explicitly
+ * deallocated by the caller when done with list.
*/
static enum ice_status
ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
@@ -1764,46 +1992,25 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
struct list_head *vsi_list_head)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
+ enum ice_status status = 0;
/* check to make sure VSI id is valid and within boundary */
- if (vsi_id >=
- (sizeof(fm_entry->vsi_list_info->vsi_map) * BITS_PER_BYTE - 1))
+ if (vsi_id >= ICE_MAX_VSI)
return ICE_ERR_PARAM;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
struct ice_fltr_info *fi;
fi = &fm_entry->fltr_info;
- if ((fi->fltr_act == ICE_FWD_TO_VSI &&
- fi->fwd_id.vsi_id == vsi_id) ||
- (fi->fltr_act == ICE_FWD_TO_VSI_LIST &&
- (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map)))) {
- struct ice_fltr_list_entry *tmp;
-
- /* this memory is freed up in the caller function
- * ice_remove_vsi_lkup_fltr() once filters for
- * this VSI are removed
- */
- tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp),
- GFP_KERNEL);
- if (!tmp)
- return ICE_ERR_NO_MEMORY;
-
- memcpy(&tmp->fltr_info, fi, sizeof(*fi));
+ if (!ice_vsi_uses_fltr(fm_entry, vsi_id))
+ continue;
- /* Expected below fields to be set to ICE_FWD_TO_VSI and
- * the particular VSI id since we are only removing this
- * one VSI
- */
- if (fi->fltr_act == ICE_FWD_TO_VSI_LIST) {
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.fwd_id.vsi_id = vsi_id;
- }
-
- list_add(&tmp->list_entry, vsi_list_head);
- }
+ status = ice_add_entry_to_vsi_fltr_list(hw, vsi_id,
+ vsi_list_head, fi);
+ if (status)
+ return status;
}
- return 0;
+ return status;
}
/**
@@ -1819,46 +2026,40 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_list_entry *fm_entry;
struct list_head remove_list_head;
+ struct list_head *rule_head;
struct ice_fltr_list_entry *tmp;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status;
INIT_LIST_HEAD(&remove_list_head);
+ rule_lock = &sw->recp_list[lkup].filt_rule_lock;
+ rule_head = &sw->recp_list[lkup].filt_rules;
+ mutex_lock(rule_lock);
+ status = ice_add_to_vsi_fltr_list(hw, vsi_id, rule_head,
+ &remove_list_head);
+ mutex_unlock(rule_lock);
+ if (status)
+ return;
+
switch (lkup) {
case ICE_SW_LKUP_MAC:
- mutex_lock(&sw->mac_list_lock);
- status = ice_add_to_vsi_fltr_list(hw, vsi_id,
- &sw->mac_list_head,
- &remove_list_head);
- mutex_unlock(&sw->mac_list_lock);
- if (!status) {
- ice_remove_mac(hw, &remove_list_head);
- goto free_fltr_list;
- }
+ ice_remove_mac(hw, &remove_list_head);
break;
case ICE_SW_LKUP_VLAN:
- mutex_lock(&sw->vlan_list_lock);
- status = ice_add_to_vsi_fltr_list(hw, vsi_id,
- &sw->vlan_list_head,
- &remove_list_head);
- mutex_unlock(&sw->vlan_list_lock);
- if (!status) {
- ice_remove_vlan(hw, &remove_list_head);
- goto free_fltr_list;
- }
+ ice_remove_vlan(hw, &remove_list_head);
break;
case ICE_SW_LKUP_MAC_VLAN:
case ICE_SW_LKUP_ETHERTYPE:
case ICE_SW_LKUP_ETHERTYPE_MAC:
case ICE_SW_LKUP_PROMISC:
- case ICE_SW_LKUP_PROMISC_VLAN:
case ICE_SW_LKUP_DFLT:
- ice_debug(hw, ICE_DBG_SW,
- "Remove filters for this lookup type hasn't been implemented yet\n");
+ case ICE_SW_LKUP_PROMISC_VLAN:
+ case ICE_SW_LKUP_LAST:
+ default:
+ ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
break;
}
- return;
-free_fltr_list:
list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
list_del(&fm_entry->list_entry);
devm_kfree(ice_hw_to_dev(hw), fm_entry);
@@ -1881,3 +2082,89 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id)
ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC);
ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN);
}
+
+/**
+ * ice_replay_fltr - Replay all the filters stored by a specific list head
+ * @hw: pointer to the hardware structure
+ * @list_head: list for which filters needs to be replayed
+ * @recp_id: Recipe id for which rules need to be replayed
+ */
+static enum ice_status
+ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct list_head *list_head)
+{
+ struct ice_fltr_mgmt_list_entry *itr;
+ struct list_head l_head;
+ enum ice_status status = 0;
+
+ if (list_empty(list_head))
+ return status;
+
+ /* Move entries from the given list_head to a temporary l_head so that
+ * they can be replayed. Otherwise when trying to re-add the same
+ * filter, the function will return already exists
+ */
+ list_replace_init(list_head, &l_head);
+
+ /* Mark the given list_head empty by reinitializing it so filters
+ * could be added again by *handler
+ */
+ list_for_each_entry(itr, &l_head, list_entry) {
+ struct ice_fltr_list_entry f_entry;
+
+ f_entry.fltr_info = itr->fltr_info;
+ if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
+ status = ice_add_rule_internal(hw, recp_id, &f_entry);
+ if (status)
+ goto end;
+ continue;
+ }
+
+ /* Add a filter per vsi separately */
+ while (1) {
+ u16 vsi;
+
+ vsi = find_first_bit(itr->vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+ if (vsi == ICE_MAX_VSI)
+ break;
+
+ clear_bit(vsi, itr->vsi_list_info->vsi_map);
+ f_entry.fltr_info.fwd_id.vsi_id = vsi;
+ f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ if (recp_id == ICE_SW_LKUP_VLAN)
+ status = ice_add_vlan_internal(hw, &f_entry);
+ else
+ status = ice_add_rule_internal(hw, recp_id,
+ &f_entry);
+ if (status)
+ goto end;
+ }
+ }
+end:
+ /* Clear the filter management list */
+ ice_rem_sw_rule_info(hw, &l_head);
+ return status;
+}
+
+/**
+ * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
+ * @hw: pointer to the hardware structure
+ *
+ * NOTE: This function does not clean up partially added filters on error.
+ * It is up to caller of the function to issue a reset or fail early.
+ */
+enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ enum ice_status status = 0;
+ u8 i;
+
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ struct list_head *head = &sw->recp_list[i].filt_rules;
+
+ status = ice_replay_fltr(hw, i, head);
+ if (status)
+ return status;
+ }
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 9b8ec128ee31..646389ca1238 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -39,6 +39,7 @@ enum ice_sw_lkup_type {
ICE_SW_LKUP_DFLT = 5,
ICE_SW_LKUP_ETHERTYPE_MAC = 8,
ICE_SW_LKUP_PROMISC_VLAN = 9,
+ ICE_SW_LKUP_LAST
};
struct ice_fltr_info {
@@ -98,6 +99,31 @@ struct ice_fltr_info {
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
+struct ice_sw_recipe {
+ struct list_head l_entry;
+
+ /* To protect modification of filt_rule list
+ * defined below
+ */
+ struct mutex filt_rule_lock;
+
+ /* List of type ice_fltr_mgmt_list_entry */
+ struct list_head filt_rules;
+
+ /* linked list of type recipe_list_entry */
+ struct list_head rg_list;
+ /* linked list of type ice_sw_fv_list_entry*/
+ struct list_head fv_list;
+ struct ice_aqc_recipe_data_elem *r_buf;
+ u8 recp_count;
+ u8 root_rid;
+ u8 num_profs;
+ u8 *prof_ids;
+
+ /* recipe bitmap: what all recipes makes this recipe */
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+};
+
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
struct ice_vsi_list_map_info {
struct list_head list_entry;
@@ -105,15 +131,9 @@ struct ice_vsi_list_map_info {
u16 vsi_list_id;
};
-enum ice_sw_fltr_status {
- ICE_FLTR_STATUS_NEW = 0,
- ICE_FLTR_STATUS_FW_SUCCESS,
- ICE_FLTR_STATUS_FW_FAIL,
-};
-
struct ice_fltr_list_entry {
struct list_head list_entry;
- enum ice_sw_fltr_status status;
+ enum ice_status status;
struct ice_fltr_info fltr_info;
};
@@ -138,18 +158,18 @@ struct ice_fltr_mgmt_list_entry {
/* VSI related commands */
enum ice_status
-ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
- struct ice_sq_cd *cd);
-enum ice_status
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
enum ice_status
-ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
- bool keep_vsi_alloc, struct ice_sq_cd *cd);
-
+ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd);
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
/* Switch/bridge related commands */
+enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id);
@@ -158,4 +178,8 @@ enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
enum ice_status
ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction);
+enum ice_status ice_replay_all_fltr(struct ice_hw *hw);
+
+enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
+
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 6481e3d86374..5dae968d853e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -251,6 +251,7 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->tx_stats.prev_pkt = -1;
return 0;
err:
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 31bc998fe200..839fd9ff6043 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -71,6 +71,7 @@ struct ice_txq_stats {
u64 restart_q;
u64 tx_busy;
u64 tx_linearize;
+ int prev_pkt; /* negative if no pending Tx descriptors */
};
struct ice_rxq_stats {
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 97c366e0ca59..e681804be4d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -34,10 +34,15 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
enum ice_aq_res_ids {
ICE_NVM_RES_ID = 1,
ICE_SPD_RES_ID,
- ICE_GLOBAL_CFG_LOCK_RES_ID,
- ICE_CHANGE_LOCK_RES_ID
+ ICE_CHANGE_LOCK_RES_ID,
+ ICE_GLOBAL_CFG_LOCK_RES_ID
};
+/* FW update timeout definitions are in milliseconds */
+#define ICE_NVM_TIMEOUT 180000
+#define ICE_CHANGE_LOCK_TIMEOUT 1000
+#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+
enum ice_aq_res_access_type {
ICE_RES_READ = 1,
ICE_RES_WRITE
@@ -144,9 +149,10 @@ struct ice_mac_info {
/* Various RESET request, These are not tied with HW reset types */
enum ice_reset_req {
- ICE_RESET_PFR = 0,
- ICE_RESET_CORER = 1,
- ICE_RESET_GLOBR = 2,
+ ICE_RESET_INVAL = 0,
+ ICE_RESET_PFR = 1,
+ ICE_RESET_CORER = 2,
+ ICE_RESET_GLOBR = 3,
};
/* Bus parameters */
@@ -204,6 +210,7 @@ enum ice_agg_type {
};
#define ICE_SCHED_DFLT_RL_PROF_ID 0
+#define ICE_SCHED_DFLT_BW_WT 1
/* vsi type list entry to locate corresponding vsi/ag nodes */
struct ice_sched_vsi_info {
@@ -247,19 +254,26 @@ struct ice_port_info {
};
struct ice_switch_info {
- /* Switch VSI lists to MAC/VLAN translation */
- struct mutex mac_list_lock; /* protect MAC list */
- struct list_head mac_list_head;
- struct mutex vlan_list_lock; /* protect VLAN list */
- struct list_head vlan_list_head;
- struct mutex eth_m_list_lock; /* protect ethtype list */
- struct list_head eth_m_list_head;
- struct mutex promisc_list_lock; /* protect promisc mode list */
- struct list_head promisc_list_head;
- struct mutex mac_vlan_list_lock; /* protect MAC-VLAN list */
- struct list_head mac_vlan_list_head;
-
struct list_head vsi_list_map_head;
+ struct ice_sw_recipe *recp_list;
+};
+
+/* FW logging configuration */
+struct ice_fw_log_evnt {
+ u8 cfg : 4; /* New event enables to configure */
+ u8 cur : 4; /* Current/active event enables */
+};
+
+struct ice_fw_log_cfg {
+ u8 cq_en : 1; /* FW logging is enabled via the control queue */
+ u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */
+ u8 actv_evnts; /* Cumulation of currently enabled log events */
+
+#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S)
+#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S)
+#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S)
+#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S)
+ struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX];
};
/* Port hardware description */
@@ -286,8 +300,11 @@ struct ice_hw {
u8 flattened_layers;
u8 max_cgds;
u8 sw_entry_point_layer;
+ u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
u8 evb_veb; /* true for VEB, false for VEPA */
+ u8 reset_ongoing; /* true if hw is in reset, false otherwise */
struct ice_bus_info bus;
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
@@ -308,6 +325,7 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
+ struct ice_fw_log_cfg fw_log;
/* minimum allowed value for different speeds */
#define ICE_ITR_GRAN_MIN_200 1
#define ICE_ITR_GRAN_MIN_100 1