summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c923
1 files changed, 734 insertions, 189 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 59ed806a52c3..0874acf5ef39 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -17,10 +17,11 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-
+#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_main.h"
+#include "hclge_mbx.h"
#include "hclge_mdio.h"
#include "hclge_tm.h"
#include "hnae3.h"
@@ -981,6 +982,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_DEFAULT_SPEED_M,
HCLGE_CFG_DEFAULT_SPEED_S);
+ cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
+
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1058,7 +1063,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->num_vmdq_vport = cfg.vmdq_vport_num;
hdev->base_tqp_pid = 0;
- hdev->rss_size_max = 1;
+ hdev->rss_size_max = cfg.rss_size_max;
hdev->rx_buf_len = cfg.rx_buf_len;
ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
hdev->hw.mac.media_type = cfg.media_type;
@@ -1095,10 +1100,7 @@ static int hclge_configure(struct hclge_dev *hdev)
for (i = 0; i < hdev->tm_info.num_tc; i++)
hnae_set_bit(hdev->hw_tc_map, i, 1);
- if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
- hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
- else
- hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
return ret;
}
@@ -2132,28 +2134,6 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
return 0;
}
-static int hclge_query_autoneg_result(struct hclge_dev *hdev)
-{
- struct hclge_mac *mac = &hdev->hw.mac;
- struct hclge_query_an_speed_dup_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "autoneg result query cmd failed %d.\n", ret);
- return ret;
- }
-
- mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
-
- return 0;
-}
-
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
{
struct hclge_config_auto_neg_cmd *req;
@@ -2189,15 +2169,42 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
- hclge_query_autoneg_result(hdev);
+ if (phydev)
+ return phydev->autoneg;
return hdev->hw.mac.autoneg;
}
+static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
+ bool mask_vlan,
+ u8 *mac_mask)
+{
+ struct hclge_mac_vlan_mask_entry_cmd *req;
+ struct hclge_desc desc;
+ int status;
+
+ req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
+
+ hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+ mask_vlan ? 1 : 0);
+ ether_addr_copy(req->mac_mask, mac_mask);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
+ status);
+
+ return status;
+}
+
static int hclge_mac_init(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
+ u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
int ret;
ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
@@ -2223,7 +2230,31 @@ static int hclge_mac_init(struct hclge_dev *hdev)
return ret;
}
- return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+ ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set mta filter mode fail ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "set default mac_vlan_mask fail ret=%d\n", ret);
+
+ return ret;
+}
+
+static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclge_reset_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->rst_service_task);
}
static void hclge_task_schedule(struct hclge_dev *hdev)
@@ -2362,6 +2393,64 @@ static void hclge_service_complete(struct hclge_dev *hdev)
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
}
+static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+{
+ u32 rst_src_reg;
+ u32 cmdq_src_reg;
+
+ /* fetch the events from their corresponding regs */
+ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+ cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
+
+ /* Assumption: If by any chance reset and mailbox events are reported
+ * together then we will only process reset event in this go and will
+ * defer the processing of the mailbox events. Since, we would have not
+ * cleared RX CMDQ event this time we would receive again another
+ * interrupt from H/W just for the mailbox.
+ */
+
+ /* check for vector0 reset event sources */
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+ cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
+ *clearval = cmdq_src_reg;
+ return HCLGE_VECTOR0_EVENT_MBX;
+ }
+
+ return HCLGE_VECTOR0_EVENT_OTHER;
+}
+
+static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+ u32 regclr)
+{
+ switch (event_type) {
+ case HCLGE_VECTOR0_EVENT_RST:
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
+ break;
+ }
+}
+
static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
{
writel(enable ? 1 : 0, vector->addr);
@@ -2370,10 +2459,38 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
+ u32 event_cause;
+ u32 clearval;
hclge_enable_vector(&hdev->misc_vector, false);
- if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->service_task);
+ event_cause = hclge_check_event_cause(hdev, &clearval);
+
+ /* vector 0 interrupt is shared with reset and mailbox source events.*/
+ switch (event_cause) {
+ case HCLGE_VECTOR0_EVENT_RST:
+ hclge_reset_task_schedule(hdev);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ /* If we are here then,
+ * 1. Either we are not handling any mbx task and we are not
+ * scheduled as well
+ * OR
+ * 2. We could be handling a mbx task but nothing more is
+ * scheduled.
+ * In both cases, we should schedule mbx task as there are more
+ * mbx messages reported by this interrupt.
+ */
+ hclge_mbx_task_schedule(hdev);
+
+ default:
+ dev_dbg(&hdev->pdev->dev,
+ "received unknown or unhandled event of vector0\n");
+ break;
+ }
+
+ /* we should clear the source of interrupt */
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+ hclge_enable_vector(&hdev->misc_vector, true);
return IRQ_HANDLED;
}
@@ -2404,9 +2521,9 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
hclge_get_misc_vector(hdev);
- ret = devm_request_irq(&hdev->pdev->dev,
- hdev->misc_vector.vector_irq,
- hclge_misc_irq_handle, 0, "hclge_misc", hdev);
+ /* this would be explicitly freed in the end */
+ ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
+ 0, "hclge_misc", hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -2416,6 +2533,12 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
return ret;
}
+static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
+{
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclge_free_vector(hdev, 0);
+}
+
static int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
@@ -2471,12 +2594,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
cnt++;
}
- /* must clear reset status register to
- * prevent driver detect reset interrupt again
- */
- reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
- hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg);
-
if (cnt >= HCLGE_RESET_WAIT_CNT) {
dev_warn(&hdev->pdev->dev,
"Wait for reset timeout: %d\n", hdev->reset_type);
@@ -2505,12 +2622,12 @@ static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
return ret;
}
-static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
+static void hclge_do_reset(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
u32 val;
- switch (type) {
+ switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
@@ -2526,30 +2643,62 @@ static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
hclge_func_reset_cmd(hdev, 0);
+ /* schedule again to check later */
+ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&pdev->dev,
- "Unsupported reset type: %d\n", type);
+ "Unsupported reset type: %d\n", hdev->reset_type);
break;
}
}
-static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev)
+static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
+ unsigned long *addr)
{
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
- u32 rst_reg_val;
- rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
- if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val)
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_GLOBAL_RESET, addr))
rst_level = HNAE3_GLOBAL_RESET;
- else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val)
+ else if (test_bit(HNAE3_CORE_RESET, addr))
rst_level = HNAE3_CORE_RESET;
- else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val)
+ else if (test_bit(HNAE3_IMP_RESET, addr))
rst_level = HNAE3_IMP_RESET;
+ else if (test_bit(HNAE3_FUNC_RESET, addr))
+ rst_level = HNAE3_FUNC_RESET;
+
+ /* now, clear all other resets */
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_IMP_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
return rst_level;
}
+static void hclge_reset(struct hclge_dev *hdev)
+{
+ /* perform reset of the stack & ae device for a client */
+
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+
+ if (!hclge_reset_wait(hdev)) {
+ rtnl_lock();
+ hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ hclge_reset_ae_dev(hdev->ae_dev);
+ hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ rtnl_unlock();
+ } else {
+ /* schedule again to check pending resets later */
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
+ }
+
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
static void hclge_reset_event(struct hnae3_handle *handle,
enum hnae3_reset_type reset)
{
@@ -2563,14 +2712,9 @@ static void hclge_reset_event(struct hnae3_handle *handle,
case HNAE3_FUNC_RESET:
case HNAE3_CORE_RESET:
case HNAE3_GLOBAL_RESET:
- if (test_bit(HCLGE_STATE_RESET_INT, &hdev->state)) {
- dev_err(&hdev->pdev->dev, "Already in reset state");
- return;
- }
- hdev->reset_type = reset;
- set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
- set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
- schedule_work(&hdev->service_task);
+ /* request reset & schedule reset task */
+ set_bit(reset, &hdev->reset_request);
+ hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset);
@@ -2580,49 +2724,55 @@ static void hclge_reset_event(struct hnae3_handle *handle,
static void hclge_reset_subtask(struct hclge_dev *hdev)
{
- bool do_reset;
+ /* check if there is any ongoing reset in the hardware. This status can
+ * be checked from reset_pending. If there is then, we need to wait for
+ * hardware to complete reset.
+ * a. If we are able to figure out in reasonable time that hardware
+ * has fully resetted then, we can proceed with driver, client
+ * reset.
+ * b. else, we can come back later to check this status so re-sched
+ * now.
+ */
+ hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_reset(hdev);
- do_reset = hdev->reset_type != HNAE3_NONE_RESET;
+ /* check if we got any *new* reset requests to be honored */
+ hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_do_reset(hdev);
- /* Reset is detected by interrupt */
- if (hdev->reset_type == HNAE3_NONE_RESET)
- hdev->reset_type = hclge_detected_reset_event(hdev);
+ hdev->reset_type = HNAE3_NONE_RESET;
+}
- if (hdev->reset_type == HNAE3_NONE_RESET)
+static void hclge_reset_service_task(struct work_struct *work)
+{
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, rst_service_task);
+
+ if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
return;
- switch (hdev->reset_type) {
- case HNAE3_FUNC_RESET:
- case HNAE3_CORE_RESET:
- case HNAE3_GLOBAL_RESET:
- case HNAE3_IMP_RESET:
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
- if (do_reset)
- hclge_do_reset(hdev, hdev->reset_type);
- else
- set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
+ hclge_reset_subtask(hdev);
- if (!hclge_reset_wait(hdev)) {
- hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
- hclge_reset_ae_dev(hdev->ae_dev);
- hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- clear_bit(HCLGE_STATE_RESET_INT, &hdev->state);
- }
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- break;
- default:
- dev_err(&hdev->pdev->dev, "Unsupported reset type:%d\n",
- hdev->reset_type);
- break;
- }
- hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
}
-static void hclge_misc_irq_service_task(struct hclge_dev *hdev)
+static void hclge_mailbox_service_task(struct work_struct *work)
{
- hclge_reset_subtask(hdev);
- hclge_enable_vector(&hdev->misc_vector, true);
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, mbx_service_task);
+
+ if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+ hclge_mbx_handler(hdev);
+
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
static void hclge_service_task(struct work_struct *work)
@@ -2630,7 +2780,6 @@ static void hclge_service_task(struct work_struct *work)
struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task);
- hclge_misc_irq_service_task(hdev);
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
hclge_update_stats_for_all(hdev);
@@ -3174,49 +3323,48 @@ err:
return ret;
}
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
- struct hnae3_ring_chain_node *ring_chain)
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain_cmd *req;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
- int ret;
+ struct hclge_ctrl_vector_chain_cmd *req
+ = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ enum hclge_cmd_status status;
+ enum hclge_opcode_type op;
+ u16 tqp_type_and_id;
int i;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
-
- req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
+ hclge_cmd_setup_basic_desc(&desc, op, false);
req->int_vector_id = vector_id;
i = 0;
for (node = ring_chain; node; node = node->next) {
- u16 type_and_id = 0;
-
- hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
- node->tqp_index);
- hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
+ tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
+ hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
+ HCLGE_INT_TYPE_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
- req->vfid = vport->vport_id;
-
+ hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+ HCLGE_TQP_ID_S, node->tqp_index);
+ req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
+ req->vfid = vport->vport_id;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
dev_err(&hdev->pdev->dev,
"Map TQP fail, status is %d.\n",
- ret);
- return ret;
+ status);
+ return -EIO;
}
i = 0;
hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_ADD_RING_TO_VECTOR,
+ op,
false);
req->int_vector_id = vector_id;
}
@@ -3224,21 +3372,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
if (i > 0) {
req->int_cause_num = i;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ req->vfid = vport->vport_id;
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
dev_err(&hdev->pdev->dev,
- "Map TQP fail, status is %d.\n", ret);
- return ret;
+ "Map TQP fail, status is %d.\n", status);
+ return -EIO;
}
}
return 0;
}
-static int hclge_map_handle_ring_to_vector(
- struct hnae3_handle *handle, int vector,
- struct hnae3_ring_chain_node *ring_chain)
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -3247,24 +3395,20 @@ static int hclge_map_handle_ring_to_vector(
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. ret =%d\n", vector_id);
+ "Get vector index fail. vector_id =%d\n", vector_id);
return vector_id;
}
- return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
+ return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
}
-static int hclge_unmap_ring_from_vector(
- struct hnae3_handle *handle, int vector,
- struct hnae3_ring_chain_node *ring_chain)
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain_cmd *req;
- struct hnae3_ring_chain_node *node;
- struct hclge_desc desc;
- int i, vector_id;
- int ret;
+ int vector_id, ret;
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
@@ -3273,54 +3417,17 @@ static int hclge_unmap_ring_from_vector(
return vector_id;
}
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
-
- req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
- req->int_vector_id = vector_id;
-
- i = 0;
- for (node = ring_chain; node; node = node->next) {
- u16 type_and_id = 0;
-
- hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
- node->tqp_index);
- hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-
- req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
- req->vfid = vport->vport_id;
-
- if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
- req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Unmap TQP fail, status is %d.\n",
- ret);
- return ret;
- }
- i = 0;
- hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_DEL_RING_TO_VECTOR,
- false);
- req->int_vector_id = vector_id;
- }
+ ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
+ if (ret) {
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
+ vector_id,
+ ret);
+ return ret;
}
- if (i > 0) {
- req->int_cause_num = i;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Unmap TQP fail, status is %d.\n", ret);
- return ret;
- }
- }
+ /* Free this MSIX or MSI vector */
+ hclge_free_vector(hdev, vector_id);
return 0;
}
@@ -4250,27 +4357,185 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
}
+static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
+ struct hclge_vport_vtag_tx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
+
+ req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
+ req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
+ vcfg->accept_tag ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
+ vcfg->accept_untag ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ req->vf_bitmap[req->vf_offset] =
+ 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port txvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
+ struct hclge_vport_vtag_rx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
+
+ req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ req->vf_bitmap[req->vf_offset] =
+ 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port rxvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+{
+ struct hclge_rx_vlan_type_cfg_cmd *rx_req;
+ struct hclge_tx_vlan_type_cfg_cmd *tx_req;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
+ rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
+ rx_req->ot_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
+ rx_req->ot_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
+ rx_req->in_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
+ rx_req->in_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Send rxvlan protocol type command fail, ret =%d\n",
+ status);
+ return status;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
+
+ tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
+ tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
+ tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send txvlan protocol type command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
-#define HCLGE_VLAN_TYPE_VF_TABLE 0
-#define HCLGE_VLAN_TYPE_PORT_TABLE 1
+#define HCLGE_FILTER_TYPE_VF 0
+#define HCLGE_FILTER_TYPE_PORT 1
+#define HCLGE_DEF_VLAN_TYPE 0x8100
+
struct hnae3_handle *handle;
+ struct hclge_vport *vport;
int ret;
+ int i;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
- true);
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
if (ret)
return ret;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
- true);
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
if (ret)
return ret;
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+
+ ret = hclge_set_vlan_protocol_type(hdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->txvlan_cfg.accept_tag = true;
+ vport->txvlan_cfg.accept_untag = true;
+ vport->txvlan_cfg.insert_tag1_en = false;
+ vport->txvlan_cfg.insert_tag2_en = false;
+ vport->txvlan_cfg.default_tag1 = 0;
+ vport->txvlan_cfg.default_tag2 = 0;
+
+ ret = hclge_set_vlan_tx_offload_cfg(vport);
+ if (ret)
+ return ret;
+
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ ret = hclge_set_vlan_rx_offload_cfg(vport);
+ if (ret)
+ return ret;
+ }
+
handle = &hdev->vport[0].nic;
return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
+static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = enable;
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4341,7 +4606,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
}
-static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -4392,6 +4657,103 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
return hdev->fw_version;
}
+static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
+ u32 *flowctrl_adv)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
+ (phydev->advertising & ADVERTISED_Asym_Pause);
+}
+
+static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ if (rx_en)
+ phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
+ if (tx_en)
+ phydev->advertising ^= ADVERTISED_Asym_Pause;
+}
+
+static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ enum hclge_fc_mode fc_mode;
+ int ret;
+
+ if (rx_en && tx_en)
+ fc_mode = HCLGE_FC_FULL;
+ else if (rx_en && !tx_en)
+ fc_mode = HCLGE_FC_RX_PAUSE;
+ else if (!rx_en && tx_en)
+ fc_mode = HCLGE_FC_TX_PAUSE;
+ else
+ fc_mode = HCLGE_FC_NONE;
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+ hdev->fc_mode_last_time = fc_mode;
+ return 0;
+ }
+
+ ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ hdev->tm_info.fc_mode = fc_mode;
+
+ return 0;
+}
+
+int hclge_cfg_flowctrl(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u16 remote_advertising = 0;
+ u16 local_advertising = 0;
+ u32 rx_pause, tx_pause;
+ u8 flowctl;
+
+ if (!phydev->link || !phydev->autoneg)
+ return 0;
+
+ if (phydev->advertising & ADVERTISED_Pause)
+ local_advertising = ADVERTISE_PAUSE_CAP;
+
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ local_advertising |= ADVERTISE_PAUSE_ASYM;
+
+ if (phydev->pause)
+ remote_advertising = LPA_PAUSE_CAP;
+
+ if (phydev->asym_pause)
+ remote_advertising |= LPA_PAUSE_ASYM;
+
+ flowctl = mii_resolve_flowctrl_fdx(local_advertising,
+ remote_advertising);
+ tx_pause = flowctl & FLOW_CTRL_TX;
+ rx_pause = flowctl & FLOW_CTRL_RX;
+
+ if (phydev->duplex == HCLGE_MAC_HALF) {
+ tx_pause = 0;
+ rx_pause = 0;
+ }
+
+ return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
+}
+
static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
u32 *rx_en, u32 *tx_en)
{
@@ -4421,6 +4783,41 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
}
}
+static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
+ u32 rx_en, u32 tx_en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u32 fc_autoneg;
+
+ /* Only support flow control negotiation for netdev with
+ * phy attached for now.
+ */
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ fc_autoneg = hclge_get_autoneg(handle);
+ if (auto_neg != fc_autoneg) {
+ dev_info(&hdev->pdev->dev,
+ "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+ dev_info(&hdev->pdev->dev,
+ "Priority flow control enabled. Cannot set link flow control.\n");
+ return -EOPNOTSUPP;
+ }
+
+ hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+
+ if (!fc_autoneg)
+ return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+
+ return phy_start_aneg(phydev);
+}
+
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex)
{
@@ -4661,6 +5058,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
+ hdev->reset_request = 0;
+ hdev->reset_pending = 0;
ae_dev->priv = hdev;
ret = hclge_pci_init(hdev);
@@ -4772,12 +5171,18 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
INIT_WORK(&hdev->service_task, hclge_service_task);
+ INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
+ INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -4889,25 +5294,159 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func)
cancel_work_sync(&hdev->service_task);
+ if (hdev->rst_service_task.func)
+ cancel_work_sync(&hdev->rst_service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- hclge_free_vector(hdev, 0);
hclge_destroy_cmd_queue(&hdev->hw);
+ hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
ae_dev->priv = NULL;
}
+static u32 hclge_get_max_channels(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+static void hclge_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ ch->max_combined = hclge_get_max_channels(handle);
+ ch->other_count = 1;
+ ch->max_other = 1;
+ ch->combined_count = vport->alloc_tqps;
+}
+
+static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
+ u16 *free_tqps, u16 *max_rss_size)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u16 temp_tqps = 0;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ if (!hdev->htqp[i].alloced)
+ temp_tqps++;
+ }
+ *free_tqps = temp_tqps;
+ *max_rss_size = hdev->rss_size_max;
+}
+
+static void hclge_release_tqp(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_tqp, q);
+
+ tqp->q.handle = NULL;
+ tqp->q.tqp_index = 0;
+ tqp->alloced = false;
+ }
+
+ devm_kfree(&hdev->pdev->dev, kinfo->tqp);
+ kinfo->tqp = NULL;
+}
+
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int cur_rss_size = kinfo->rss_size;
+ int cur_tqps = kinfo->num_tqps;
+ u16 tc_offset[HCLGE_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 tc_size[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ u32 *rss_indir;
+ int ret, i;
+
+ hclge_release_tqp(vport);
+
+ ret = hclge_knic_setup(vport, new_tqps_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_map_tqp_to_vport(hdev, vport);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_tm_schd_init(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ roundup_size = roundup_pow_of_two(kinfo->rss_size);
+ roundup_size = ilog2(roundup_size);
+ /* Set the RSS TC mode according to the new RSS size */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = kinfo->rss_size * i;
+ }
+ ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ if (ret)
+ return ret;
+
+ /* Reinitializes the rss indirect table according to the new RSS size */
+ rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ if (!rss_indir)
+ return -ENOMEM;
+
+ for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ rss_indir[i] = i % kinfo->rss_size;
+
+ ret = hclge_set_rss(handle, rss_indir, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+ ret);
+
+ kfree(rss_indir);
+
+ if (!ret)
+ dev_info(&hdev->pdev->dev,
+ "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+ cur_rss_size, kinfo->rss_size,
+ cur_tqps, kinfo->rss_size * kinfo->num_tc);
+
+ return ret;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
- .map_ring_to_vector = hclge_map_handle_ring_to_vector,
- .unmap_ring_from_vector = hclge_unmap_ring_from_vector,
+ .map_ring_to_vector = hclge_map_ring_to_vector,
+ .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
.get_vector = hclge_get_vector,
.set_promisc_mode = hclge_set_promisc_mode,
.set_loopback = hclge_set_loopback,
@@ -4934,6 +5473,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.get_pauseparam = hclge_get_pauseparam,
+ .set_pauseparam = hclge_set_pauseparam,
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
@@ -4944,7 +5484,12 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_mdix_mode = hclge_get_mdix_mode,
.set_vlan_filter = hclge_set_port_vlan_filter,
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
+ .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
+ .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
+ .set_channels = hclge_set_channels,
+ .get_channels = hclge_get_channels,
+ .get_flowctrl_adv = hclge_get_flowctrl_adv,
};
static struct hnae3_ae_algo ae_algo = {