diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon')
17 files changed, 1442 insertions, 531 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 3e9203ea42a6..519e2bd6aa60 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -11,6 +11,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */ + HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset*/ HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */ HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */ HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */ @@ -57,6 +58,8 @@ enum hclge_mbx_vlan_cfg_subcode { #define HCLGE_MBX_MAX_MSG_SIZE 16 #define HCLGE_MBX_MAX_RESP_DATA_SIZE 8 +#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 +#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 struct hclgevf_mbx_resp_status { struct mutex mbx_mutex; /* protects against contending sync cmd resp */ @@ -83,6 +86,21 @@ struct hclge_mbx_pf_to_vf_cmd { u16 msg[8]; }; +/* used by VF to store the received Async responses from PF */ +struct hclgevf_mbx_arq_ring { +#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8 +#define HCLGE_MBX_MAX_ARQ_MSG_NUM 1024 + struct hclgevf_dev *hdev; + u32 head; + u32 tail; + u32 count; + u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; +}; + #define hclge_mbx_ring_ptr_move_crq(crq) \ (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) +#define hclge_mbx_tail_ptr_move_arq(arq) \ + (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) +#define hclge_mbx_head_ptr_move_arq(arq) \ + (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index fd06bc78c58e..37ec1b3286c6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -118,6 +118,8 @@ enum hnae3_reset_notify_type { }; enum hnae3_reset_type { + HNAE3_VF_RESET, + HNAE3_VF_FULL_RESET, HNAE3_FUNC_RESET, HNAE3_CORE_RESET, HNAE3_GLOBAL_RESET, @@ -265,6 +267,8 @@ struct hnae3_ae_dev { * Get tc size of handle * get_vector() * Get vector number and vector information + * put_vector() + * Put the vector in hdev * map_ring_to_vector() * Map rings to vector * unmap_ring_from_vector() @@ -336,7 +340,8 @@ struct hnae3_ae_ops { u32 *tx_usecs_high, u32 *rx_usecs_high); void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); - int (*set_mac_addr)(struct hnae3_handle *handle, void *p); + int (*set_mac_addr)(struct hnae3_handle *handle, void *p, + bool is_first); int (*add_uc_addr)(struct hnae3_handle *handle, const unsigned char *addr); int (*rm_uc_addr)(struct hnae3_handle *handle, @@ -375,6 +380,7 @@ struct hnae3_ae_ops { int (*get_vector)(struct hnae3_handle *handle, u16 vector_num, struct hnae3_vector_info *vector_info); + int (*put_vector)(struct hnae3_handle *handle, int vector_num); int (*map_ring_to_vector)(struct hnae3_handle *handle, int vector_num, struct hnae3_ring_chain_node *vr_chain); @@ -396,8 +402,7 @@ struct hnae3_ae_ops { int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, u16 vlan, u8 qos, __be16 proto); int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable); - void (*reset_event)(struct hnae3_handle *handle, - enum hnae3_reset_type reset); + void (*reset_event)(struct hnae3_handle *handle); void (*get_channels)(struct hnae3_handle *handle, struct ethtool_channels *ch); void (*get_tqps_and_rss_info)(struct hnae3_handle *h, @@ -407,6 +412,10 @@ struct hnae3_ae_ops { u32 *flowctrl_adv); int (*set_led_id)(struct hnae3_handle *handle, enum ethtool_phys_id_state status); + void (*get_link_mode)(struct hnae3_handle *handle, + unsigned long *supported, + unsigned long *advertising); + void (*get_port_type)(struct hnae3_handle *handle, u8 *port_type); }; struct hnae3_dcb_ops { @@ -487,6 +496,9 @@ struct hnae3_handle { struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */ u64 flags; /* Indicate the capabilities for this handle*/ + unsigned long last_reset_time; + enum hnae3_reset_type reset_level; + union { struct net_device *netdev; /* first member */ struct hnae3_knic_private_info kinfo; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 601b6295d3f8..40a3eb70629e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -168,8 +168,8 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing */ - if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable && - !tqp_vector->rx_group.gl_adapt_enable) + if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && + !tqp_vector->rx_group.coal.gl_adapt_enable) /* According to the hardware, the range of rl_reg is * 0-59 and the unit is 4. */ @@ -205,23 +205,30 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, */ /* Default: enable interrupt coalescing self-adaptive and GL */ - tqp_vector->tx_group.gl_adapt_enable = 1; - tqp_vector->rx_group.gl_adapt_enable = 1; + tqp_vector->tx_group.coal.gl_adapt_enable = 1; + tqp_vector->rx_group.coal.gl_adapt_enable = 1; - tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K; - tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K; - - hns3_set_vector_coalesce_tx_gl(tqp_vector, - tqp_vector->tx_group.int_gl); - hns3_set_vector_coalesce_rx_gl(tqp_vector, - tqp_vector->rx_group.int_gl); + tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; + tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; /* Default: disable RL */ h->kinfo.int_rl_setting = 0; - hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); - tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW; - tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; + tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; + tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; + tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; +} + +static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, + struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + + hns3_set_vector_coalesce_tx_gl(tqp_vector, + tqp_vector->tx_group.coal.int_gl); + hns3_set_vector_coalesce_rx_gl(tqp_vector, + tqp_vector->rx_group.coal.int_gl); + hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); } static int hns3_nic_set_real_num_queue(struct net_device *netdev) @@ -249,6 +256,16 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) return 0; } +static u16 hns3_get_max_available_channels(struct hnae3_handle *h) +{ + u16 free_tqps, max_rss_size, max_tqps; + + h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); + max_tqps = h->kinfo.num_tc * max_rss_size; + + return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); +} + static int hns3_nic_net_up(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -303,7 +320,7 @@ static int hns3_nic_net_open(struct net_device *netdev) return ret; } - priv->last_reset_time = jiffies; + priv->ae_handle->last_reset_time = jiffies; return 0; } @@ -1104,7 +1121,7 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) return -EADDRNOTAVAIL; - ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data); + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); if (ret) { netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); return ret; @@ -1388,11 +1405,15 @@ static int hns3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct hnae3_handle *h = hns3_get_handle(netdev); + struct hns3_nic_priv *priv = netdev_priv(netdev); int ret = -EIO; if (h->ae_algo->ops->set_vlan_filter) ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); + if (!ret) + set_bit(vid, priv->active_vlans); + return ret; } @@ -1400,14 +1421,32 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct hnae3_handle *h = hns3_get_handle(netdev); + struct hns3_nic_priv *priv = netdev_priv(netdev); int ret = -EIO; if (h->ae_algo->ops->set_vlan_filter) ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); + if (!ret) + clear_bit(vid, priv->active_vlans); + return ret; } +static void hns3_restore_vlan(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + u16 vid; + int ret; + + for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { + ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + if (ret) + netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n", + vid, ret); + } +} + static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { @@ -1504,7 +1543,6 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) static void hns3_nic_net_timeout(struct net_device *ndev) { struct hns3_nic_priv *priv = netdev_priv(ndev); - unsigned long last_reset_time = priv->last_reset_time; struct hnae3_handle *h = priv->ae_handle; if (!hns3_get_tx_timeo_queue_info(ndev)) @@ -1512,24 +1550,12 @@ static void hns3_nic_net_timeout(struct net_device *ndev) priv->tx_timeout_count++; - /* This timeout is far away enough from last timeout, - * if timeout again,set the reset type to PF reset - */ - if (time_after(jiffies, (last_reset_time + 20 * HZ))) - priv->reset_level = HNAE3_FUNC_RESET; - - /* Don't do any new action before the next timeout */ - else if (time_before(jiffies, (last_reset_time + ndev->watchdog_timeo))) + if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo))) return; - priv->last_reset_time = jiffies; - + /* request the reset */ if (h->ae_algo->ops->reset_event) - h->ae_algo->ops->reset_event(h, priv->reset_level); - - priv->reset_level++; - if (priv->reset_level > HNAE3_GLOBAL_RESET) - priv->reset_level = HNAE3_GLOBAL_RESET; + h->ae_algo->ops->reset_event(h); } static const struct net_device_ops hns3_nic_netdev_ops = { @@ -2064,15 +2090,13 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, desc = &ring->desc[ring->next_to_clean]; size = le16_to_cpu(desc->rx.size); - if (twobufs) { - truesize = hnae_buf_size(ring); - } else { - truesize = ALIGN(size, L1_CACHE_BYTES); + truesize = hnae_buf_size(ring); + + if (!twobufs) last_offset = hnae_page_size(ring) - hnae_buf_size(ring); - } skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, - size - pull_len, truesize - pull_len); + size - pull_len, truesize); /* Avoid re-using remote pages,flag default unreuse */ if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) @@ -2369,20 +2393,20 @@ out: static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) { -#define HNS3_RX_ULTRA_PACKET_RATE 40000 + struct hns3_enet_tqp_vector *tqp_vector = + ring_group->ring->tqp_vector; enum hns3_flow_level_range new_flow_level; - struct hns3_enet_tqp_vector *tqp_vector; - int packets_per_secs; - int bytes_per_usecs; + int packets_per_msecs; + int bytes_per_msecs; + u32 time_passed_ms; u16 new_int_gl; - int usecs; - if (!ring_group->int_gl) + if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies) return false; if (ring_group->total_packets == 0) { - ring_group->int_gl = HNS3_INT_GL_50K; - ring_group->flow_level = HNS3_FLOW_LOW; + ring_group->coal.int_gl = HNS3_INT_GL_50K; + ring_group->coal.flow_level = HNS3_FLOW_LOW; return true; } @@ -2392,35 +2416,46 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) * 20-1249MB/s high (18000 ints/s) * > 40000pps ultra (8000 ints/s) */ - new_flow_level = ring_group->flow_level; - new_int_gl = ring_group->int_gl; - tqp_vector = ring_group->ring->tqp_vector; - usecs = (ring_group->int_gl << 1); - bytes_per_usecs = ring_group->total_bytes / usecs; - /* 1000000 microseconds */ - packets_per_secs = ring_group->total_packets * 1000000 / usecs; + new_flow_level = ring_group->coal.flow_level; + new_int_gl = ring_group->coal.int_gl; + time_passed_ms = + jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); + + if (!time_passed_ms) + return false; + + do_div(ring_group->total_packets, time_passed_ms); + packets_per_msecs = ring_group->total_packets; + + do_div(ring_group->total_bytes, time_passed_ms); + bytes_per_msecs = ring_group->total_bytes; + +#define HNS3_RX_LOW_BYTE_RATE 10000 +#define HNS3_RX_MID_BYTE_RATE 20000 switch (new_flow_level) { case HNS3_FLOW_LOW: - if (bytes_per_usecs > 10) + if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) new_flow_level = HNS3_FLOW_MID; break; case HNS3_FLOW_MID: - if (bytes_per_usecs > 20) + if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) new_flow_level = HNS3_FLOW_HIGH; - else if (bytes_per_usecs <= 10) + else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) new_flow_level = HNS3_FLOW_LOW; break; case HNS3_FLOW_HIGH: case HNS3_FLOW_ULTRA: default: - if (bytes_per_usecs <= 20) + if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) new_flow_level = HNS3_FLOW_MID; break; } - if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) && - (&tqp_vector->rx_group == ring_group)) +#define HNS3_RX_ULTRA_PACKET_RATE 40 + + if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && + &tqp_vector->rx_group == ring_group) new_flow_level = HNS3_FLOW_ULTRA; switch (new_flow_level) { @@ -2442,9 +2477,9 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) ring_group->total_bytes = 0; ring_group->total_packets = 0; - ring_group->flow_level = new_flow_level; - if (new_int_gl != ring_group->int_gl) { - ring_group->int_gl = new_int_gl; + ring_group->coal.flow_level = new_flow_level; + if (new_int_gl != ring_group->coal.int_gl) { + ring_group->coal.int_gl = new_int_gl; return true; } return false; @@ -2456,19 +2491,27 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; bool rx_update, tx_update; - if (rx_group->gl_adapt_enable) { + if (tqp_vector->int_adapt_down > 0) { + tqp_vector->int_adapt_down--; + return; + } + + if (rx_group->coal.gl_adapt_enable) { rx_update = hns3_get_new_int_gl(rx_group); if (rx_update) hns3_set_vector_coalesce_rx_gl(tqp_vector, - rx_group->int_gl); + rx_group->coal.int_gl); } - if (tx_group->gl_adapt_enable) { + if (tx_group->coal.gl_adapt_enable) { tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); if (tx_update) hns3_set_vector_coalesce_tx_gl(tqp_vector, - tx_group->int_gl); + tx_group->coal.int_gl); } + + tqp_vector->last_jiffies = jiffies; + tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; } static int hns3_nic_common_poll(struct napi_struct *napi, int budget) @@ -2615,32 +2658,18 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; - struct hnae3_vector_info *vector; - struct pci_dev *pdev = h->pdev; - u16 tqp_num = h->kinfo.num_tqps; - u16 vector_num; int ret = 0; u16 i; - /* RSS size, cpu online and vector_num should be the same */ - /* Should consider 2p/4p later */ - vector_num = min_t(u16, num_online_cpus(), tqp_num); - vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), - GFP_KERNEL); - if (!vector) - return -ENOMEM; - - vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); - - priv->vector_num = vector_num; - priv->tqp_vector = (struct hns3_enet_tqp_vector *) - devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), - GFP_KERNEL); - if (!priv->tqp_vector) - return -ENOMEM; + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + hns3_vector_gl_rl_init_hw(tqp_vector, priv); + tqp_vector->num_tqps = 0; + } - for (i = 0; i < tqp_num; i++) { - u16 vector_i = i % vector_num; + for (i = 0; i < h->kinfo.num_tqps; i++) { + u16 vector_i = i % priv->vector_num; + u16 tqp_num = h->kinfo.num_tqps; tqp_vector = &priv->tqp_vector[vector_i]; @@ -2650,52 +2679,94 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) hns3_add_ring_to_group(&tqp_vector->rx_group, priv->ring_data[i + tqp_num].ring); - tqp_vector->idx = vector_i; - tqp_vector->mask_addr = vector[vector_i].io_addr; - tqp_vector->vector_irq = vector[vector_i].vector; - tqp_vector->num_tqps++; - priv->ring_data[i].ring->tqp_vector = tqp_vector; priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; + tqp_vector->num_tqps++; } - for (i = 0; i < vector_num; i++) { + for (i = 0; i < priv->vector_num; i++) { tqp_vector = &priv->tqp_vector[i]; tqp_vector->rx_group.total_bytes = 0; tqp_vector->rx_group.total_packets = 0; tqp_vector->tx_group.total_bytes = 0; tqp_vector->tx_group.total_packets = 0; - hns3_vector_gl_rl_init(tqp_vector, priv); tqp_vector->handle = h; ret = hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain); if (ret) - goto out; + return ret; ret = h->ae_algo->ops->map_ring_to_vector(h, tqp_vector->vector_irq, &vector_ring_chain); - if (ret) - goto out; hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + if (ret) + return ret; + netif_napi_add(priv->netdev, &tqp_vector->napi, hns3_nic_common_poll, NAPI_POLL_WEIGHT); } + return 0; +} + +static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_vector_info *vector; + struct pci_dev *pdev = h->pdev; + u16 tqp_num = h->kinfo.num_tqps; + u16 vector_num; + int ret = 0; + u16 i; + + /* RSS size, cpu online and vector_num should be the same */ + /* Should consider 2p/4p later */ + vector_num = min_t(u16, num_online_cpus(), tqp_num); + vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), + GFP_KERNEL); + if (!vector) + return -ENOMEM; + + vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); + + priv->vector_num = vector_num; + priv->tqp_vector = (struct hns3_enet_tqp_vector *) + devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), + GFP_KERNEL); + if (!priv->tqp_vector) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + tqp_vector->idx = i; + tqp_vector->mask_addr = vector[i].io_addr; + tqp_vector->vector_irq = vector[i].vector; + hns3_vector_gl_rl_init(tqp_vector, priv); + } + out: devm_kfree(&pdev->dev, vector); return ret; } +static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) +{ + group->ring = NULL; + group->count = 0; +} + static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) { struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; - struct pci_dev *pdev = h->pdev; int i, ret; for (i = 0; i < priv->vector_num; i++) { @@ -2711,6 +2782,10 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) if (ret) return ret; + ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); + if (ret) + return ret; + hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { @@ -2722,12 +2797,30 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) } priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; - + hns3_clear_ring_group(&tqp_vector->rx_group); + hns3_clear_ring_group(&tqp_vector->tx_group); netif_napi_del(&priv->tqp_vector[i].napi); } - devm_kfree(&pdev->dev, priv->tqp_vector); + return 0; +} +static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct pci_dev *pdev = h->pdev; + int i, ret; + + for (i = 0; i < priv->vector_num; i++) { + struct hns3_enet_tqp_vector *tqp_vector; + + tqp_vector = &priv->tqp_vector[i]; + ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); + if (ret) + return ret; + } + + devm_kfree(&pdev->dev, priv->tqp_vector); return 0; } @@ -2957,13 +3050,8 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) h->ae_algo->ops->reset_queue(h, i); hns3_fini_ring(priv->ring_data[i].ring); - devm_kfree(priv->dev, priv->ring_data[i].ring); hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); - devm_kfree(priv->dev, - priv->ring_data[i + h->kinfo.num_tqps].ring); } - devm_kfree(priv->dev, priv->ring_data); - return 0; } @@ -2987,7 +3075,7 @@ static void hns3_init_mac_addr(struct net_device *netdev) } if (h->ae_algo->ops->set_mac_addr) - h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); + h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); } @@ -3013,7 +3101,7 @@ static int hns3_client_init(struct hnae3_handle *handle) int ret; netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), - handle->kinfo.num_tqps); + hns3_get_max_available_channels(handle)); if (!netdev) return -ENOMEM; @@ -3021,8 +3109,8 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->dev = &pdev->dev; priv->netdev = netdev; priv->ae_handle = handle; - priv->last_reset_time = jiffies; - priv->reset_level = HNAE3_FUNC_RESET; + priv->ae_handle->reset_level = HNAE3_NONE_RESET; + priv->ae_handle->last_reset_time = jiffies; priv->tx_timeout_count = 0; handle->kinfo.netdev = netdev; @@ -3048,6 +3136,12 @@ static int hns3_client_init(struct hnae3_handle *handle) goto out_get_ring_cfg; } + ret = hns3_nic_alloc_vector_data(priv); + if (ret) { + ret = -ENOMEM; + goto out_alloc_vector_data; + } + ret = hns3_nic_init_vector_data(priv); if (ret) { ret = -ENOMEM; @@ -3076,8 +3170,10 @@ static int hns3_client_init(struct hnae3_handle *handle) out_reg_netdev_fail: out_init_ring_data: (void)hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; out_init_vector_data: + hns3_nic_dealloc_vector_data(priv); +out_alloc_vector_data: + priv->ring_data = NULL; out_get_ring_cfg: priv->ae_handle = NULL; free_netdev(netdev); @@ -3097,10 +3193,16 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) if (ret) netdev_err(netdev, "uninit vector error\n"); + ret = hns3_nic_dealloc_vector_data(priv); + if (ret) + netdev_err(netdev, "dealloc vector error\n"); + ret = hns3_uninit_all_ring(priv); if (ret) netdev_err(netdev, "uninit ring error\n"); + hns3_put_ring_config(priv); + priv->ring_data = NULL; free_netdev(netdev); @@ -3240,7 +3342,6 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); int ret = 0; if (netif_running(kinfo->netdev)) { @@ -3250,8 +3351,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) "hns net up fail, ret=%d!\n", ret); return ret; } - - priv->last_reset_time = jiffies; + handle->last_reset_time = jiffies; } return ret; @@ -3263,11 +3363,14 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - priv->reset_level = 1; hns3_init_mac_addr(netdev); hns3_nic_set_rx_mode(netdev); hns3_recover_hw_addr(netdev); + /* Hardware table is only clear when pf resets */ + if (!(handle->flags & HNAE3_SUPPORT_VF)) + hns3_restore_vlan(netdev); + /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); @@ -3306,6 +3409,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) if (ret) netdev_err(netdev, "uninit ring error\n"); + hns3_put_ring_config(priv); + priv->ring_data = NULL; return ret; @@ -3336,18 +3441,24 @@ static int hns3_reset_notify(struct hnae3_handle *handle, return ret; } -static u16 hns3_get_max_available_channels(struct net_device *netdev) +static void hns3_restore_coal(struct hns3_nic_priv *priv, + struct hns3_enet_coalesce *tx, + struct hns3_enet_coalesce *rx) { - struct hnae3_handle *h = hns3_get_handle(netdev); - u16 free_tqps, max_rss_size, max_tqps; - - h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); - max_tqps = h->kinfo.num_tc * max_rss_size; + u16 vector_num = priv->vector_num; + int i; - return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); + for (i = 0; i < vector_num; i++) { + memcpy(&priv->tqp_vector[i].tx_group.coal, tx, + sizeof(struct hns3_enet_coalesce)); + memcpy(&priv->tqp_vector[i].rx_group.coal, rx, + sizeof(struct hns3_enet_coalesce)); + } } -static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) +static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num, + struct hns3_enet_coalesce *tx, + struct hns3_enet_coalesce *rx) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); @@ -3361,6 +3472,12 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) if (ret) return ret; + ret = hns3_nic_alloc_vector_data(priv); + if (ret) + goto err_alloc_vector; + + hns3_restore_coal(priv, tx, rx); + ret = hns3_nic_init_vector_data(priv); if (ret) goto err_uninit_vector; @@ -3375,6 +3492,8 @@ err_put_ring: hns3_put_ring_config(priv); err_uninit_vector: hns3_nic_uninit_vector_data(priv); +err_alloc_vector: + hns3_nic_dealloc_vector_data(priv); return ret; } @@ -3389,6 +3508,7 @@ int hns3_set_channels(struct net_device *netdev, struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; + struct hns3_enet_coalesce tx_coal, rx_coal; bool if_running = netif_running(netdev); u32 new_tqp_num = ch->combined_count; u16 org_tqp_num; @@ -3397,12 +3517,12 @@ int hns3_set_channels(struct net_device *netdev, if (ch->rx_count || ch->tx_count) return -EINVAL; - if (new_tqp_num > hns3_get_max_available_channels(netdev) || + if (new_tqp_num > hns3_get_max_available_channels(h) || new_tqp_num < kinfo->num_tc) { dev_err(&netdev->dev, "Change tqps fail, the tqp range is from %d to %d", kinfo->num_tc, - hns3_get_max_available_channels(netdev)); + hns3_get_max_available_channels(h)); return -EINVAL; } @@ -3411,7 +3531,7 @@ int hns3_set_channels(struct net_device *netdev, return 0; if (if_running) - dev_close(netdev); + hns3_nic_net_stop(netdev); hns3_clear_all_ring(h); @@ -3422,12 +3542,26 @@ int hns3_set_channels(struct net_device *netdev, goto open_netdev; } + /* Changing the tqp num may also change the vector num, + * ethtool only support setting and querying one coal + * configuation for now, so save the vector 0' coal + * configuation here in order to restore it. + */ + memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal, + sizeof(struct hns3_enet_coalesce)); + memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal, + sizeof(struct hns3_enet_coalesce)); + + hns3_nic_dealloc_vector_data(priv); + hns3_uninit_all_ring(priv); + hns3_put_ring_config(priv); org_tqp_num = h->kinfo.num_tqps; - ret = hns3_modify_tqp_num(netdev, new_tqp_num); + ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal); if (ret) { - ret = hns3_modify_tqp_num(netdev, org_tqp_num); + ret = hns3_modify_tqp_num(netdev, org_tqp_num, + &tx_coal, &rx_coal); if (ret) { /* If revert to old tqp failed, fatal error occurred */ dev_err(&netdev->dev, @@ -3440,7 +3574,7 @@ int hns3_set_channels(struct net_device *netdev, open_netdev: if (if_running) - dev_open(netdev); + hns3_nic_net_open(netdev); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 213f501b30bb..9e4cfbbf8dcd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -10,6 +10,8 @@ #ifndef __HNS3_ENET_H #define __HNS3_ENET_H +#include <linux/if_vlan.h> + #include "hnae3.h" extern const char hns3_driver_version[]; @@ -460,15 +462,21 @@ enum hns3_link_mode_bits { #define HNS3_INT_RL_MAX 0x00EC #define HNS3_INT_RL_ENABLE_MASK 0x40 +#define HNS3_INT_ADAPT_DOWN_START 100 + +struct hns3_enet_coalesce { + u16 int_gl; + u8 gl_adapt_enable; + enum hns3_flow_level_range flow_level; +}; + struct hns3_enet_ring_group { /* array of pointers to rings */ struct hns3_enet_ring *ring; u64 total_bytes; /* total bytes processed this group */ u64 total_packets; /* total packets processed this group */ u16 count; - enum hns3_flow_level_range flow_level; - u16 int_gl; - u8 gl_adapt_enable; + struct hns3_enet_coalesce coal; }; struct hns3_enet_tqp_vector { @@ -491,6 +499,7 @@ struct hns3_enet_tqp_vector { /* when 0 should adjust interrupt coalesce parameter */ u8 int_adapt_down; + unsigned long last_jiffies; } ____cacheline_internodealigned_in_smp; enum hns3_udp_tnl_type { @@ -523,8 +532,6 @@ struct hns3_nic_priv { /* The most recently read link state */ int link; u64 tx_timeout_count; - enum hnae3_reset_type reset_level; - unsigned long last_reset_time; unsigned long state; @@ -535,6 +542,7 @@ struct hns3_nic_priv { struct notifier_block notifier_block; /* Vxlan/Geneve information */ struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; }; union l3_hdr_info { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index b034c7f24eda..9d07116a4426 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -74,19 +74,6 @@ struct hns3_link_mode_mapping { u32 ethtool_link_mode; }; -static const struct hns3_link_mode_mapping hns3_lm_map[] = { - {HNS3_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, - {HNS3_LM_AUTONEG_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, - {HNS3_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, - {HNS3_LM_PAUSE_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, - {HNS3_LM_BACKPLANE_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, - {HNS3_LM_10BASET_HALF_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT}, - {HNS3_LM_10BASET_FULL_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT}, - {HNS3_LM_100BASET_HALF_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT}, - {HNS3_LM_100BASET_FULL_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT}, - {HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, -}; - static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) { struct hnae3_handle *h = hns3_get_handle(ndev); @@ -309,6 +296,9 @@ static void hns3_self_test(struct net_device *ndev, struct hnae3_handle *h = priv->ae_handle; int st_param[HNS3_SELF_TEST_TPYE_NUM][2]; bool if_running = netif_running(ndev); +#if IS_ENABLED(CONFIG_VLAN_8021Q) + bool dis_vlan_filter; +#endif int test_index = 0; u32 i; @@ -323,6 +313,14 @@ static void hns3_self_test(struct net_device *ndev, if (if_running) dev_close(ndev); +#if IS_ENABLED(CONFIG_VLAN_8021Q) + /* Disable the vlan filter for selftest does not support it */ + dis_vlan_filter = (ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) && + h->ae_algo->ops->enable_vlan_filter; + if (dis_vlan_filter) + h->ae_algo->ops->enable_vlan_filter(h, false); +#endif + set_bit(HNS3_NIC_STATE_TESTING, &priv->state); for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) { @@ -345,28 +343,15 @@ static void hns3_self_test(struct net_device *ndev, clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); +#if IS_ENABLED(CONFIG_VLAN_8021Q) + if (dis_vlan_filter) + h->ae_algo->ops->enable_vlan_filter(h, true); +#endif + if (if_running) dev_open(ndev); } -static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd, - bool is_advertised) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(hns3_lm_map); i++) { - if (!(caps & hns3_lm_map[i].hns3_link_mode)) - continue; - - if (is_advertised) - __set_bit(hns3_lm_map[i].ethtool_link_mode, - cmd->link_modes.advertising); - else - __set_bit(hns3_lm_map[i].ethtool_link_mode, - cmd->link_modes.supported); - } -} - static int hns3_get_sset_count(struct net_device *netdev, int stringset) { struct hnae3_handle *h = hns3_get_handle(netdev); @@ -578,18 +563,19 @@ static int hns3_get_link_ksettings(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); u32 flowctrl_adv = 0; - u32 supported_caps; - u32 advertised_caps; - u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; u8 link_stat; if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; /* 1.auto_neg & speed & duplex from cmd */ - if (netdev->phydev) + if (netdev->phydev) { phy_ethtool_ksettings_get(netdev->phydev, cmd); - else if (h->ae_algo->ops->get_ksettings_an_result) + + return 0; + } + + if (h->ae_algo->ops->get_ksettings_an_result) h->ae_algo->ops->get_ksettings_an_result(h, &cmd->base.autoneg, &cmd->base.speed, @@ -603,62 +589,16 @@ static int hns3_get_link_ksettings(struct net_device *netdev, cmd->base.duplex = DUPLEX_UNKNOWN; } - /* 2.media_type get from bios parameter block */ - if (h->ae_algo->ops->get_media_type) { - h->ae_algo->ops->get_media_type(h, &media_type); - - switch (media_type) { - case HNAE3_MEDIA_TYPE_FIBER: - cmd->base.port = PORT_FIBRE; - supported_caps = HNS3_LM_FIBRE_BIT | - HNS3_LM_AUTONEG_BIT | - HNS3_LM_PAUSE_BIT | - HNS3_LM_1000BASET_FULL_BIT; - - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_COPPER: - cmd->base.port = PORT_TP; - supported_caps = HNS3_LM_TP_BIT | - HNS3_LM_AUTONEG_BIT | - HNS3_LM_PAUSE_BIT | - HNS3_LM_1000BASET_FULL_BIT | - HNS3_LM_100BASET_FULL_BIT | - HNS3_LM_100BASET_HALF_BIT | - HNS3_LM_10BASET_FULL_BIT | - HNS3_LM_10BASET_HALF_BIT; - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_BACKPLANE: - cmd->base.port = PORT_NONE; - supported_caps = HNS3_LM_BACKPLANE_BIT | - HNS3_LM_PAUSE_BIT | - HNS3_LM_AUTONEG_BIT | - HNS3_LM_1000BASET_FULL_BIT | - HNS3_LM_100BASET_FULL_BIT | - HNS3_LM_100BASET_HALF_BIT | - HNS3_LM_10BASET_FULL_BIT | - HNS3_LM_10BASET_HALF_BIT; - - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_UNKNOWN: - default: - cmd->base.port = PORT_OTHER; - supported_caps = 0; - advertised_caps = 0; - break; - } - - if (!cmd->base.autoneg) - advertised_caps &= ~HNS3_LM_AUTONEG_BIT; + /* 2.get link mode and port type*/ + if (h->ae_algo->ops->get_link_mode) + h->ae_algo->ops->get_link_mode(h, + cmd->link_modes.supported, + cmd->link_modes.advertising); - advertised_caps &= ~HNS3_LM_PAUSE_BIT; - - /* now, map driver link modes to ethtool link modes */ - hns3_driv_to_eth_caps(supported_caps, cmd, false); - hns3_driv_to_eth_caps(advertised_caps, cmd, true); - } + cmd->base.port = PORT_NONE; + if (h->ae_algo->ops->get_port_type) + h->ae_algo->ops->get_port_type(h, + &cmd->base.port); /* 3.mdix_ctrl&mdix get from phy reg */ if (h->ae_algo->ops->get_mdix_mode) @@ -905,11 +845,13 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, tx_vector = priv->ring_data[queue].ring->tqp_vector; rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; - cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.gl_adapt_enable; - cmd->use_adaptive_rx_coalesce = rx_vector->rx_group.gl_adapt_enable; + cmd->use_adaptive_tx_coalesce = + tx_vector->tx_group.coal.gl_adapt_enable; + cmd->use_adaptive_rx_coalesce = + rx_vector->rx_group.coal.gl_adapt_enable; - cmd->tx_coalesce_usecs = tx_vector->tx_group.int_gl; - cmd->rx_coalesce_usecs = rx_vector->rx_group.int_gl; + cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl; + cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl; cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; @@ -1029,14 +971,18 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, tx_vector = priv->ring_data[queue].ring->tqp_vector; rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; - tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce; - rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce; + tx_vector->tx_group.coal.gl_adapt_enable = + cmd->use_adaptive_tx_coalesce; + rx_vector->rx_group.coal.gl_adapt_enable = + cmd->use_adaptive_rx_coalesce; - tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs; - rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs; + tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs; + rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs; - hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl); - hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl); + hns3_set_vector_coalesce_tx_gl(tx_vector, + tx_vector->tx_group.coal.int_gl); + hns3_set_vector_coalesce_rx_gl(rx_vector, + rx_vector->rx_group.coal.int_gl); hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); @@ -1111,6 +1057,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_channels = hns3_get_channels, .get_coalesce = hns3_get_coalesce, .set_coalesce = hns3_set_coalesce, + .get_link = hns3_get_link, }; static const struct ethtool_ops hns3_ethtool_ops = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 3fd10a6bec53..ee3cbac6dfaa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -12,7 +12,7 @@ #include <linux/types.h> #include <linux/io.h> -#define HCLGE_CMDQ_TX_TIMEOUT 1000 +#define HCLGE_CMDQ_TX_TIMEOUT 30000 struct hclge_dev; struct hclge_desc { @@ -414,6 +414,8 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16) #define HCLGE_CFG_RSS_SIZE_S 24 #define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24) +#define HCLGE_CFG_SPEED_ABILITY_S 0 +#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0) struct hclge_cfg_param_cmd { __le32 offset; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 5018d6633133..955f0e3d5c95 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -144,6 +144,8 @@ static int hclge_map_update(struct hnae3_handle *h) if (ret) return ret; + hclge_rss_indir_init_cfg(hdev); + return hclge_rss_init_hw(hdev); } @@ -203,9 +205,11 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) { + u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC]; struct hclge_vport *vport = hclge_get_vport(h); struct hclge_dev *hdev = vport->back; u8 i, j, pfc_map, *prio_tc; + int ret; memset(pfc, 0, sizeof(*pfc)); pfc->pfc_cap = hdev->pfc_max; @@ -220,6 +224,18 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) } } + ret = hclge_pfc_tx_stats_get(hdev, requests); + if (ret) + return ret; + + ret = hclge_pfc_rx_stats_get(hdev, indications); + if (ret) + return ret; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + pfc->requests[i] = requests[i]; + pfc->indications[i] = indications[i]; + } return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 32bc6f68e297..bede4117bad9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -55,6 +55,8 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { {0, } }; +MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); + static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { "Mac Loopback test", "Serdes Loopback test", @@ -1024,6 +1026,45 @@ static int hclge_parse_speed(int speed_cmd, int *speed) return 0; } +static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, + u8 speed_ability) +{ + unsigned long *supported = hdev->hw.mac.supported; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + supported); + + set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); + set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); +} + +static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) +{ + u8 media_type = hdev->hw.mac.media_type; + + if (media_type != HNAE3_MEDIA_TYPE_FIBER) + return; + + hclge_parse_fiber_link_mode(hdev, speed_ability); +} + static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) { struct hclge_cfg_param_cmd *req; @@ -1072,6 +1113,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) req = (struct hclge_cfg_param_cmd *)desc[1].data; cfg->numa_node_map = __le32_to_cpu(req->param[0]); + + cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_SPEED_ABILITY_M, + HCLGE_CFG_SPEED_ABILITY_S); } /* hclge_get_cfg: query the static parameter from flash @@ -1160,6 +1205,8 @@ static int hclge_configure(struct hclge_dev *hdev) return ret; } + hclge_parse_link_mode(hdev, cfg.speed_ability); + if ((hdev->tc_max > HNAE3_MAX_TC) || (hdev->tc_max < 1)) { dev_warn(&hdev->pdev->dev, "TC num = %d.\n", @@ -2702,7 +2749,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev) return 0; } -static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) +int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) { struct hclge_desc desc; struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; @@ -2798,27 +2845,31 @@ static void hclge_reset(struct hclge_dev *hdev) hclge_notify_client(hdev, HNAE3_UP_CLIENT); } -static void hclge_reset_event(struct hnae3_handle *handle, - enum hnae3_reset_type reset) +static void hclge_reset_event(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - dev_info(&hdev->pdev->dev, - "Receive reset event , reset_type is %d", reset); + /* check if this is a new reset request and we are not here just because + * last reset attempt did not succeed and watchdog hit us again. We will + * know this if last reset request did not occur very recently (watchdog + * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) + * In case of new request we reset the "reset level" to PF reset. + */ + if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) + handle->reset_level = HNAE3_FUNC_RESET; - switch (reset) { - case HNAE3_FUNC_RESET: - case HNAE3_CORE_RESET: - case HNAE3_GLOBAL_RESET: - /* request reset & schedule reset task */ - set_bit(reset, &hdev->reset_request); - hclge_reset_task_schedule(hdev); - break; - default: - dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset); - break; - } + dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", + handle->reset_level); + + /* request reset & schedule reset task */ + set_bit(handle->reset_level, &hdev->reset_request); + hclge_reset_task_schedule(hdev); + + if (handle->reset_level < HNAE3_GLOBAL_RESET) + handle->reset_level++; + + handle->last_reset_time = jiffies; } static void hclge_reset_subtask(struct hclge_dev *hdev) @@ -2969,6 +3020,24 @@ static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) return -EINVAL; } +static int hclge_put_vector(struct hnae3_handle *handle, int vector) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int vector_id; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&hdev->pdev->dev, + "Get vector index fail. vector_id =%d\n", vector_id); + return vector_id; + } + + hclge_free_vector(hdev, vector_id); + + return 0; +} + static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) { return HCLGE_RSS_KEY_SIZE; @@ -2979,31 +3048,6 @@ static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) return HCLGE_RSS_IND_TBL_SIZE; } -static int hclge_get_rss_algo(struct hclge_dev *hdev) -{ - struct hclge_rss_config_cmd *req; - struct hclge_desc desc; - int rss_hash_algo; - int ret; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true); - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get link status error, status =%d\n", ret); - return ret; - } - - req = (struct hclge_rss_config_cmd *)desc.data; - rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK); - - if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ) - return ETH_RSS_HASH_TOP; - - return -EINVAL; -} - static int hclge_set_rss_algo_key(struct hclge_dev *hdev, const u8 hfunc, const u8 *key) { @@ -3042,7 +3086,7 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev, return 0; } -static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir) +static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) { struct hclge_rss_indirection_table_cmd *req; struct hclge_desc desc; @@ -3116,14 +3160,16 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); req = (struct hclge_rss_input_tuple_cmd *)desc.data; - req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; - req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; - req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + + /* Get the tuple cfg from pf */ + req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -3138,12 +3184,11 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, u8 *hfunc) { struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; int i; /* Get hash algorithm */ if (hfunc) - *hfunc = hclge_get_rss_algo(hdev); + *hfunc = vport->rss_algo; /* Get the RSS Key required by the user */ if (key) @@ -3167,8 +3212,6 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, /* Set the RSS Hash Key if specififed by the user */ if (key) { - /* Update the shadow RSS key with user specified qids */ - memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); if (hfunc == ETH_RSS_HASH_TOP || hfunc == ETH_RSS_HASH_NO_CHANGE) @@ -3178,6 +3221,10 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, ret = hclge_set_rss_algo_key(hdev, hash_algo, key); if (ret) return ret; + + /* Update the shadow RSS key with user specified qids */ + memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); + vport->rss_algo = hash_algo; } /* Update the shadow RSS table with user specified qids */ @@ -3185,8 +3232,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, vport->rss_indirection_tbl[i] = indir[i]; /* Update the hardware */ - ret = hclge_set_rss_indir_table(hdev, indir); - return ret; + return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); } static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) @@ -3229,15 +3275,16 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, return -EINVAL; req = (struct hclge_rss_input_tuple_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Read rss tuple fail, status = %d\n", ret); - return ret; - } + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); - hclge_cmd_reuse_desc(&desc, false); + req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; tuple_sets = hclge_get_rss_hash_bits(nfc); switch (nfc->flow_type) { @@ -3274,52 +3321,49 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, } ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) + if (ret) { dev_err(&hdev->pdev->dev, "Set rss tuple fail, status = %d\n", ret); + return ret; + } - return ret; + vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; + vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; + vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; + vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; + vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; + vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; + vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; + vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + return 0; } static int hclge_get_rss_tuple(struct hnae3_handle *handle, struct ethtool_rxnfc *nfc) { struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - struct hclge_rss_input_tuple_cmd *req; - struct hclge_desc desc; u8 tuple_sets; - int ret; nfc->data = 0; - req = (struct hclge_rss_input_tuple_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Read rss tuple fail, status = %d\n", ret); - return ret; - } - switch (nfc->flow_type) { case TCP_V4_FLOW: - tuple_sets = req->ipv4_tcp_en; + tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; break; case UDP_V4_FLOW: - tuple_sets = req->ipv4_udp_en; + tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; break; case TCP_V6_FLOW: - tuple_sets = req->ipv6_tcp_en; + tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; break; case UDP_V6_FLOW: - tuple_sets = req->ipv6_udp_en; + tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; break; case SCTP_V4_FLOW: - tuple_sets = req->ipv4_sctp_en; + tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; break; case SCTP_V6_FLOW: - tuple_sets = req->ipv6_sctp_en; + tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; break; case IPV4_FLOW: case IPV6_FLOW: @@ -3354,50 +3398,28 @@ static int hclge_get_tc_size(struct hnae3_handle *handle) int hclge_rss_init_hw(struct hclge_dev *hdev) { - const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ; struct hclge_vport *vport = hdev->vport; + u8 *rss_indir = vport[0].rss_indirection_tbl; + u16 rss_size = vport[0].alloc_rss_size; + u8 *key = vport[0].rss_hash_key; + u8 hfunc = vport[0].rss_algo; u16 tc_offset[HCLGE_MAX_TC_NUM]; - u8 rss_key[HCLGE_RSS_KEY_SIZE]; u16 tc_valid[HCLGE_MAX_TC_NUM]; u16 tc_size[HCLGE_MAX_TC_NUM]; - u32 *rss_indir = NULL; - u16 rss_size = 0, roundup_size; - const u8 *key; - int i, ret, j; - - rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); - if (!rss_indir) - return -ENOMEM; - - /* Get default RSS key */ - netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE); - - /* Initialize RSS indirect table for each vport */ - for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { - for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { - vport[j].rss_indirection_tbl[i] = - i % vport[j].alloc_rss_size; - - /* vport 0 is for PF */ - if (j != 0) - continue; + u16 roundup_size; + int i, ret; - rss_size = vport[j].alloc_rss_size; - rss_indir[i] = vport[j].rss_indirection_tbl[i]; - } - } ret = hclge_set_rss_indir_table(hdev, rss_indir); if (ret) - goto err; + return ret; - key = rss_key; ret = hclge_set_rss_algo_key(hdev, hfunc, key); if (ret) - goto err; + return ret; ret = hclge_set_rss_input_tuple(hdev); if (ret) - goto err; + return ret; /* Each TC have the same queue size, and tc_size set to hardware is * the log2 of roundup power of two of rss_size, the acutal queue @@ -3407,8 +3429,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev) dev_err(&hdev->pdev->dev, "Configure rss tc size failed, invalid TC_SIZE = %d\n", rss_size); - ret = -EINVAL; - goto err; + return -EINVAL; } roundup_size = roundup_pow_of_two(rss_size); @@ -3425,12 +3446,50 @@ int hclge_rss_init_hw(struct hclge_dev *hdev) tc_offset[i] = rss_size * i; } - ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); +} -err: - kfree(rss_indir); +void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int i, j; - return ret; + for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) + vport[j].rss_indirection_tbl[i] = + i % vport[j].alloc_rss_size; + } +} + +static void hclge_rss_init_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int i; + + netdev_rss_key_fill(vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + vport[i].rss_tuple_sets.ipv4_tcp_en = + HCLGE_RSS_INPUT_TUPLE_OTHER; + vport[i].rss_tuple_sets.ipv4_udp_en = + HCLGE_RSS_INPUT_TUPLE_OTHER; + vport[i].rss_tuple_sets.ipv4_sctp_en = + HCLGE_RSS_INPUT_TUPLE_SCTP; + vport[i].rss_tuple_sets.ipv4_fragment_en = + HCLGE_RSS_INPUT_TUPLE_OTHER; + vport[i].rss_tuple_sets.ipv6_tcp_en = + HCLGE_RSS_INPUT_TUPLE_OTHER; + vport[i].rss_tuple_sets.ipv6_udp_en = + HCLGE_RSS_INPUT_TUPLE_OTHER; + vport[i].rss_tuple_sets.ipv6_sctp_en = + HCLGE_RSS_INPUT_TUPLE_SCTP; + vport[i].rss_tuple_sets.ipv6_fragment_en = + HCLGE_RSS_INPUT_TUPLE_OTHER; + + vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + } + + hclge_rss_indir_init_cfg(hdev); } int hclge_bind_ring_with_vector(struct hclge_vport *vport, @@ -3533,18 +3592,13 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, } ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); - if (ret) { + if (ret) dev_err(&handle->pdev->dev, "Unmap ring from vector fail. vectorid=%d, ret =%d\n", vector_id, ret); - return ret; - } - - /* Free this MSIX or MSI vector */ - hclge_free_vector(hdev, vector_id); - return 0; + return ret; } int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, @@ -3717,20 +3771,11 @@ static int hclge_ae_start(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int i, queue_id, ret; + int i, ret; - for (i = 0; i < vport->alloc_tqps; i++) { - /* todo clear interrupt */ - /* ring enable */ - queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); - if (queue_id < 0) { - dev_warn(&hdev->pdev->dev, - "Get invalid queue id, ignore it\n"); - continue; - } + for (i = 0; i < vport->alloc_tqps; i++) + hclge_tqp_enable(hdev, i, 0, true); - hclge_tqp_enable(hdev, queue_id, 0, true); - } /* mac enable */ hclge_cfg_mac_mode(hdev, true); clear_bit(HCLGE_STATE_DOWN, &hdev->state); @@ -3750,19 +3795,11 @@ static void hclge_ae_stop(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int i, queue_id; + int i; - for (i = 0; i < vport->alloc_tqps; i++) { - /* Ring disable */ - queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); - if (queue_id < 0) { - dev_warn(&hdev->pdev->dev, - "Get invalid queue id, ignore it\n"); - continue; - } + for (i = 0; i < vport->alloc_tqps; i++) + hclge_tqp_enable(hdev, i, 0, false); - hclge_tqp_enable(hdev, queue_id, 0, false); - } /* Mac disable */ hclge_cfg_mac_mode(hdev, false); @@ -3770,6 +3807,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle) /* reset tqp stats */ hclge_reset_tqp_stats(handle); + del_timer_sync(&hdev->service_timer); + cancel_work_sync(&hdev->service_task); + hclge_update_link_status(hdev); } static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, @@ -3790,11 +3830,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, if ((!resp_code) || (resp_code == 1)) { return_status = 0; } else if (resp_code == 2) { - return_status = -EIO; + return_status = -ENOSPC; dev_err(&hdev->pdev->dev, "add mac addr failed for uc_overflow.\n"); } else if (resp_code == 3) { - return_status = -EIO; + return_status = -ENOSPC; dev_err(&hdev->pdev->dev, "add mac addr failed for mc_overflow.\n"); } else { @@ -3806,7 +3846,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, if (!resp_code) { return_status = 0; } else if (resp_code == 1) { - return_status = -EIO; + return_status = -ENOENT; dev_dbg(&hdev->pdev->dev, "remove mac addr failed for miss.\n"); } else { @@ -3818,7 +3858,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, if (!resp_code) { return_status = 0; } else if (resp_code == 1) { - return_status = -EIO; + return_status = -ENOENT; dev_dbg(&hdev->pdev->dev, "lookup mac addr failed for miss.\n"); } else { @@ -3827,7 +3867,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, resp_code); } } else { - return_status = -EIO; + return_status = -EINVAL; dev_err(&hdev->pdev->dev, "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", op); @@ -4118,8 +4158,9 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, { struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; - enum hclge_cmd_status status; + struct hclge_desc desc; u16 egress_port = 0; + int ret; /* mac addr check */ if (is_zero_ether_addr(addr) || @@ -4151,9 +4192,23 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, hclge_prepare_mac_addr(&req, addr); - status = hclge_add_mac_vlan_tbl(vport, &req, NULL); + /* Lookup the mac address in the mac_vlan table, and add + * it if the entry is inexistent. Repeated unicast entry + * is not allowed in the mac vlan table. + */ + ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); + if (ret == -ENOENT) + return hclge_add_mac_vlan_tbl(vport, &req, NULL); + + /* check if we just hit the duplicate */ + if (!ret) + ret = -EINVAL; - return status; + dev_err(&hdev->pdev->dev, + "PF failed to add unicast entry(%pM) in the MAC table\n", + addr); + + return ret; } static int hclge_rm_uc_addr(struct hnae3_handle *handle, @@ -4169,7 +4224,7 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, { struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; - enum hclge_cmd_status status; + int ret; /* mac addr check */ if (is_zero_ether_addr(addr) || @@ -4185,9 +4240,9 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); - status = hclge_remove_mac_vlan_tbl(vport, &req); + ret = hclge_remove_mac_vlan_tbl(vport, &req); - return status; + return ret; } static int hclge_add_mc_addr(struct hnae3_handle *handle, @@ -4392,7 +4447,8 @@ static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) ether_addr_copy(p, hdev->hw.mac.mac_addr); } -static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) +static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, + bool is_first) { const unsigned char *new_addr = (const unsigned char *)p; struct hclge_vport *vport = hclge_get_vport(handle); @@ -4409,11 +4465,9 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) return -EINVAL; } - ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); - if (ret) + if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) dev_warn(&hdev->pdev->dev, - "remove old uc mac address fail, ret =%d.\n", - ret); + "remove old uc mac address fail.\n"); ret = hclge_add_uc_addr(handle, new_addr); if (ret) { @@ -4421,17 +4475,15 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) "add uc mac address fail, ret =%d.\n", ret); - ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr); - if (ret) { + if (!is_first && + hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) dev_err(&hdev->pdev->dev, - "restore uc mac address fail, ret =%d.\n", - ret); - } + "restore uc mac address fail.\n"); return -EIO; } - ret = hclge_mac_pause_addr_cfg(hdev, new_addr); + ret = hclge_pause_addr_cfg(hdev, new_addr); if (ret) { dev_err(&hdev->pdev->dev, "configure mac pause address fail, ret =%d.\n", @@ -4771,11 +4823,9 @@ static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) return hclge_set_vlan_rx_offload_cfg(vport); } -static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) { - struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_config_max_frm_size_cmd *req; - struct hclge_dev *hdev = vport->back; struct hclge_desc desc; int max_frm_size; int ret; @@ -4804,6 +4854,27 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) return 0; } +static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + ret = hclge_set_mac_mtu(hdev, new_mtu); + if (ret) { + dev_err(&hdev->pdev->dev, + "Change mtu fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_buffer_alloc(hdev); + if (ret) + dev_err(&hdev->pdev->dev, + "Allocate buffer fail, ret =%d\n", ret); + + return ret; +} + static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, bool enable) { @@ -4848,21 +4919,36 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); } +static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, + u16 queue_id) +{ + struct hnae3_queue *queue; + struct hclge_tqp *tqp; + + queue = handle->kinfo.tqp[queue_id]; + tqp = container_of(queue, struct hclge_tqp, q); + + return tqp->index; +} + void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; int reset_try_times = 0; int reset_status; + u16 queue_gid; int ret; + queue_gid = hclge_covert_handle_qid_global(handle, queue_id); + ret = hclge_tqp_enable(hdev, queue_id, 0, false); if (ret) { dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); return; } - ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true); + ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); if (ret) { dev_warn(&hdev->pdev->dev, "Send reset tqp cmd fail, ret = %d\n", ret); @@ -4873,7 +4959,7 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { /* Wait for tqp hw reset */ msleep(20); - reset_status = hclge_get_reset_status(hdev, queue_id); + reset_status = hclge_get_reset_status(hdev, queue_gid); if (reset_status) break; } @@ -4883,7 +4969,7 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) return; } - ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false); + ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); if (ret) { dev_warn(&hdev->pdev->dev, "Deassert the soft reset fail, ret = %d\n", ret); @@ -4891,6 +4977,43 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) } } +void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) +{ + struct hclge_dev *hdev = vport->back; + int reset_try_times = 0; + int reset_status; + u16 queue_gid; + int ret; + + queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); + + ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); + if (ret) { + dev_warn(&hdev->pdev->dev, + "Send reset tqp cmd fail, ret = %d\n", ret); + return; + } + + reset_try_times = 0; + while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { + /* Wait for tqp hw reset */ + msleep(20); + reset_status = hclge_get_reset_status(hdev, queue_gid); + if (reset_status) + break; + } + + if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { + dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); + return; + } + + ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); + if (ret) + dev_warn(&hdev->pdev->dev, + "Deassert the soft reset fail, ret = %d\n", ret); +} + static u32 hclge_get_fw_version(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -5376,11 +5499,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); return ret; } - ret = hclge_buffer_alloc(hdev); - if (ret) { - dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); - return ret; - } ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); if (ret) { @@ -5400,6 +5518,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + hclge_rss_init_cfg(hdev); ret = hclge_rss_init_hw(hdev); if (ret) { dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); @@ -5486,12 +5605,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_buffer_alloc(hdev); - if (ret) { - dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); - return ret; - } - ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); if (ret) { dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); @@ -5504,9 +5617,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_tm_schd_init(hdev); + ret = hclge_tm_init_hw(hdev); if (ret) { - dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); + dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); return ret; } @@ -5997,6 +6110,42 @@ static int hclge_update_led_status(struct hclge_dev *hdev) HCLGE_LED_NO_CHANGE); } +static void hclge_get_link_mode(struct hnae3_handle *handle, + unsigned long *supported, + unsigned long *advertising) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + unsigned int idx = 0; + + for (; idx < size; idx++) { + supported[idx] = hdev->hw.mac.supported[idx]; + advertising[idx] = hdev->hw.mac.advertising[idx]; + } +} + +static void hclge_get_port_type(struct hnae3_handle *handle, + u8 *port_type) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u8 media_type = hdev->hw.mac.media_type; + + switch (media_type) { + case HNAE3_MEDIA_TYPE_FIBER: + *port_type = PORT_FIBRE; + break; + case HNAE3_MEDIA_TYPE_COPPER: + *port_type = PORT_TP; + break; + case HNAE3_MEDIA_TYPE_UNKNOWN: + default: + *port_type = PORT_OTHER; + break; + } +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -6005,6 +6154,7 @@ static const struct hnae3_ae_ops hclge_ops = { .map_ring_to_vector = hclge_map_ring_to_vector, .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, .get_vector = hclge_get_vector, + .put_vector = hclge_put_vector, .set_promisc_mode = hclge_set_promisc_mode, .set_loopback = hclge_set_loopback, .start = hclge_ae_start, @@ -6051,6 +6201,8 @@ static const struct hnae3_ae_ops hclge_ops = { .get_regs_len = hclge_get_regs_len, .get_regs = hclge_get_regs, .set_led_id = hclge_set_led_id, + .get_link_mode = hclge_get_link_mode, + .get_port_type = hclge_get_port_type, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index d99a76a9557c..0f4157e71282 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -106,6 +106,12 @@ #define HCLGE_MAC_MIN_FRAME 64 #define HCLGE_MAC_MAX_FRAME 9728 +#define HCLGE_SUPPORT_1G_BIT BIT(0) +#define HCLGE_SUPPORT_10G_BIT BIT(1) +#define HCLGE_SUPPORT_25G_BIT BIT(2) +#define HCLGE_SUPPORT_50G_BIT BIT(3) +#define HCLGE_SUPPORT_100G_BIT BIT(4) + enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, HCLGE_STATE_DOWN, @@ -170,6 +176,8 @@ struct hclge_mac { struct phy_device *phydev; struct mii_bus *mdio_bus; phy_interface_t phy_if; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); }; struct hclge_hw { @@ -236,6 +244,7 @@ struct hclge_cfg { u8 mac_addr[ETH_ALEN]; u8 default_speed; u32 numa_node_map; + u8 speed_ability; }; struct hclge_tm_info { @@ -573,12 +582,27 @@ struct hclge_rx_vtag_cfg { bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ }; +struct hclge_rss_tuple_cfg { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; +}; + struct hclge_vport { u16 alloc_tqps; /* Allocated Tx/Rx queues */ u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ /* User configured lookup table entries */ u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; + int rss_algo; /* User configured hash algorithm */ + /* User configured rss tuple sets */ + struct hclge_rss_tuple_cfg rss_tuple_sets; + u16 alloc_rss_size; u16 qs_offset; @@ -627,8 +651,11 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); +void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev); void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); +void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); int hclge_cfg_flowctrl(struct hclge_dev *hdev); +int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f38fc5ce9f51..39013334a613 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -79,6 +79,18 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, return status; } +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) +{ + u8 msg_data[2]; + u8 dest_vfid; + + dest_vfid = (u8)vport->vport_id; + + /* send this requested info to VF */ + return hclge_send_mbx_msg(vport, msg_data, sizeof(u8), + HCLGE_MBX_ASSERTING_RESET, dest_vfid); +} + static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) { struct hnae3_ring_chain_node *chain_tmp, *chain; @@ -105,14 +117,17 @@ static int hclge_get_ring_chain_from_mbx( struct hnae3_ring_chain_node *ring_chain, struct hclge_vport *vport) { -#define HCLGE_RING_NODE_VARIABLE_NUM 3 -#define HCLGE_RING_MAP_MBX_BASIC_MSG_NUM 3 struct hnae3_ring_chain_node *cur_chain, *new_chain; int ring_num; int i; ring_num = req->msg[2]; + if (ring_num > ((HCLGE_MBX_VF_MSG_DATA_NUM - + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / + HCLGE_MBX_RING_NODE_VARIABLE_NUM)) + return -ENOMEM; + hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); ring_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]); @@ -128,18 +143,18 @@ static int hclge_get_ring_chain_from_mbx( goto err; hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, - req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i + - HCLGE_RING_MAP_MBX_BASIC_MSG_NUM]); + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); new_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp - [req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i + - HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 1]]); + [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]); hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S, - req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i + - HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 2]); + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); cur_chain->next = new_chain; cur_chain = new_chain; @@ -196,6 +211,8 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, hclge_rm_uc_addr_common(vport, old_addr); status = hclge_add_uc_addr_common(vport, mac_addr); + if (status) + hclge_add_uc_addr_common(vport, old_addr); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) { status = hclge_add_uc_addr_common(vport, mac_addr); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { @@ -291,7 +308,7 @@ static int hclge_get_vf_queue_info(struct hclge_vport *vport, /* get the queue related info */ memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16)); - memcpy(&resp_data[2], &hdev->rss_size_max, sizeof(u16)); + memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16)); memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16)); memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16)); @@ -304,27 +321,61 @@ static int hclge_get_link_info(struct hclge_vport *vport, { struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[2]; + u8 msg_data[8]; u8 dest_vfid; + u16 duplex; /* mac.link can only be 0 or 1 */ link_status = (u16)hdev->hw.mac.link; + duplex = hdev->hw.mac.duplex; memcpy(&msg_data[0], &link_status, sizeof(u16)); + memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); + memcpy(&msg_data[6], &duplex, sizeof(u16)); dest_vfid = mbx_req->mbx_src_vfid; /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(u8), + return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); } -static void hclge_reset_vf_queue(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req) +static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) { u16 queue_id; memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id)); - hclge_reset_tqp(&vport->nic, queue_id); + hclge_reset_vf_queue(vport, queue_id); + + /* send response msg to VF after queue reset complete*/ + hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0); +} + +static void hclge_reset_vf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!", + mbx_req->mbx_src_vfid); + + /* Acknowledge VF that PF is now about to assert the reset for the VF. + * On receiving this message VF will get into pending state and will + * start polling for the hardware reset completion status. + */ + ret = hclge_inform_reset_assert_to_vf(vport); + if (ret) { + dev_err(&hdev->pdev->dev, + "PF fail(%d) to inform VF(%d)of reset, reset failed!\n", + ret, vport->vport_id); + return; + } + + dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n", + mbx_req->mbx_src_vfid); + /* reset this virtual function */ + hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid); } void hclge_mbx_handler(struct hclge_dev *hdev) @@ -333,11 +384,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev) struct hclge_mbx_vf_to_pf_cmd *req; struct hclge_vport *vport; struct hclge_desc *desc; - int ret; + int ret, flag; + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); /* handle all the mailbox requests in the queue */ - while (hnae_get_bit(crq->desc[crq->next_to_use].flag, - HCLGE_CMDQ_RX_OUTVLD_B)) { + while (hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B)) { desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; @@ -360,7 +411,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_SET_UNICAST: - ret = hclge_set_vf_uc_mac_addr(vport, req, false); + ret = hclge_set_vf_uc_mac_addr(vport, req, true); if (ret) dev_err(&hdev->pdev->dev, "PF fail(%d) to set VF UC MAC Addr\n", @@ -402,7 +453,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_QUEUE_RESET: - hclge_reset_vf_queue(vport, req); + hclge_mbx_reset_vf_queue(vport, req); + break; + case HCLGE_MBX_RESET: + hclge_reset_vf(vport, req); break; default: dev_err(&hdev->pdev->dev, @@ -410,7 +464,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev) req->msg[0]); break; } + crq->desc[crq->next_to_use].flag = 0; hclge_mbx_ring_ptr_move_crq(crq); + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); } /* Write back CMDQ_RQ header pointer, M7 need this pointer */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 36bd79a77940..885f25cd7be4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -23,6 +23,9 @@ enum hclge_shaper_level { HCLGE_SHAPER_LVL_PF = 1, }; +#define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3 +#define HCLGE_TM_PFC_NUM_GET_PER_CMD 3 + #define HCLGE_SHAPER_BS_U_DEF 5 #define HCLGE_SHAPER_BS_S_DEF 20 @@ -112,6 +115,56 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, return 0; } +static int hclge_pfc_stats_get(struct hclge_dev *hdev, + enum hclge_opcode_type opcode, u64 *stats) +{ + struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM]; + int ret, i, j; + + if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT || + opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT)) + return -EINVAL; + + for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { + hclge_cmd_setup_basic_desc(&desc[i], opcode, true); + if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1)) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + } + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get pfc pause stats fail, ret = %d.\n", ret); + return ret; + } + + for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { + struct hclge_pfc_stats_cmd *pfc_stats = + (struct hclge_pfc_stats_cmd *)desc[i].data; + + for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) { + u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j; + + if (index < HCLGE_MAX_TC_NUM) + stats[index] = + le64_to_cpu(pfc_stats->pkt_num[j]); + } + } + return 0; +} + +int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) +{ + return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats); +} + +int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) +{ + return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats); +} + int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) { struct hclge_desc desc; @@ -138,8 +191,8 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, - u8 pause_trans_gap, u16 pause_trans_time) +static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, + u8 pause_trans_gap, u16 pause_trans_time) { struct hclge_cfg_pause_param_cmd *pause_param; struct hclge_desc desc; @@ -155,7 +208,7 @@ static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, return hclge_cmd_send(&hdev->hw, &desc, 1); } -int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) +int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) { struct hclge_cfg_pause_param_cmd *pause_param; struct hclge_desc desc; @@ -174,7 +227,7 @@ int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) trans_gap = pause_param->pause_trans_gap; trans_time = le16_to_cpu(pause_param->pause_trans_time); - return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap, + return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time); } @@ -1096,11 +1149,11 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) return hclge_tm_schd_mode_hw(hdev); } -static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev) +static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; - return hclge_mac_pause_param_cfg(hdev, mac->mac_addr, + return hclge_pause_param_cfg(hdev, mac->mac_addr, HCLGE_DEFAULT_PAUSE_TRANS_GAP, HCLGE_DEFAULT_PAUSE_TRANS_TIME); } @@ -1151,13 +1204,12 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) int ret; u8 i; - if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { - ret = hclge_mac_pause_setup_hw(hdev); - if (ret) - return ret; + ret = hclge_pause_param_setup_hw(hdev); + if (ret) + return ret; - return hclge_mac_pause_param_setup_hw(hdev); - } + if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) + return hclge_mac_pause_setup_hw(hdev); /* Only DCB-supported dev supports qset back pressure and pfc cmd */ if (!hnae3_dev_dcb_supported(hdev)) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 5401e7559437..2dbe177581e9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -109,6 +109,10 @@ struct hclge_cfg_pause_param_cmd { __le16 pause_trans_time; }; +struct hclge_pfc_stats_cmd { + __le64 pkt_num[3]; +}; + struct hclge_port_shapping_cmd { __le32 port_shapping_para; }; @@ -129,5 +133,7 @@ int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); int hclge_tm_map_cfg(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev); int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); -int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); +int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); +int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); +int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 85985e731311..1bbfe131b596 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -315,6 +315,12 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) goto err_csq; } + /* initialize the pointers of async rx queue of mailbox */ + hdev->arq.hdev = hdev; + hdev->arq.head = 0; + hdev->arq.tail = 0; + hdev->arq.count = 0; + /* get firmware version */ ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 2caca9317f8c..621c6cbacf76 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -7,7 +7,7 @@ #include <linux/types.h> #include "hnae3.h" -#define HCLGEVF_CMDQ_TX_TIMEOUT 200 +#define HCLGEVF_CMDQ_TX_TIMEOUT 30000 #define HCLGEVF_CMDQ_RX_INVLD_B 0 #define HCLGEVF_CMDQ_RX_OUTVLD_B 1 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 0d89965f7928..2b8426412cc9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2,6 +2,7 @@ // Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/etherdevice.h> +#include <net/rtnetlink.h> #include "hclgevf_cmd.h" #include "hclgevf_main.h" #include "hclge_mbx.h" @@ -9,6 +10,8 @@ #define HCLGEVF_NAME "hclgevf" +static int hclgevf_init_hdev(struct hclgevf_dev *hdev); +static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); static struct hnae3_ae_algo ae_algovf; static const struct pci_device_id ae_algovf_pci_tbl[] = { @@ -18,6 +21,8 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = { {0, } }; +MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); + static inline struct hclgevf_dev *hclgevf_ae_get_hdev( struct hnae3_handle *handle) { @@ -206,6 +211,12 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) struct hclgevf_tqp *tqp; int i; + /* if this is on going reset then we need to re-allocate the TPQs + * since we cannot assume we would get same number of TPQs back from PF + */ + if (hclgevf_dev_ongoing_reset(hdev)) + devm_kfree(&hdev->pdev->dev, hdev->htqp); + hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, sizeof(struct hclgevf_tqp), GFP_KERNEL); if (!hdev->htqp) @@ -249,6 +260,12 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) new_tqps = kinfo->rss_size * kinfo->num_tc; kinfo->num_tqps = min(new_tqps, hdev->num_tqps); + /* if this is on going reset then we need to re-allocate the hnae queues + * as well since number of TPQs from PF might have changed. + */ + if (hclgevf_dev_ongoing_reset(hdev)) + devm_kfree(&hdev->pdev->dev, kinfo->tqp); + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, sizeof(struct hnae3_queue *), GFP_KERNEL); if (!kinfo->tqp) @@ -533,13 +550,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, int vector, struct hnae3_ring_chain_node *ring_chain) { -#define HCLGEVF_RING_NODE_VARIABLE_NUM 3 -#define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hnae3_ring_chain_node *node; struct hclge_mbx_vf_to_pf_cmd *req; struct hclgevf_desc desc; - int i, vector_id; + int i = 0, vector_id; int status; u8 type; @@ -551,28 +566,33 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, return vector_id; } - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); - type = en ? - HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR; - req->msg[0] = type; - req->msg[1] = vector_id; /* vector_id should be id in VF */ - - i = 0; for (node = ring_chain; node; node = node->next) { - i++; - /* msg[2] is cause num */ - req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] = + int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + + HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; + + if (i == 0) { + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_MBX_VF_TO_PF, + false); + type = en ? + HCLGE_MBX_MAP_RING_TO_VECTOR : + HCLGE_MBX_UNMAP_RING_TO_VECTOR; + req->msg[0] = type; + req->msg[1] = vector_id; + } + + req->msg[idx_offset] = hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); - req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] = - node->tqp_index; - req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 2] = - hnae_get_field(node->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S); - - if (i == (HCLGE_MBX_VF_MSG_DATA_NUM - - HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) / - HCLGEVF_RING_NODE_VARIABLE_NUM) { + req->msg[idx_offset + 1] = node->tqp_index; + req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S); + + i++; + if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / + HCLGE_MBX_RING_NODE_VARIABLE_NUM) || + !node->next) { req->msg[2] = i; status = hclgevf_cmd_send(&hdev->hw, &desc, 1); @@ -591,17 +611,6 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, } } - if (i > 0) { - req->msg[2] = i; - - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "Map TQP fail, status is %d.\n", status); - return status; - } - } - return 0; } @@ -627,13 +636,18 @@ static int hclgevf_unmap_ring_from_vector( } ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain); - if (ret) { + if (ret) dev_err(&handle->pdev->dev, "Unmap ring from vector fail. vector=%d, ret =%d\n", vector_id, ret); - return ret; - } + + return ret; +} + +static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); hclgevf_free_vector(hdev, vector); @@ -729,21 +743,25 @@ static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) ether_addr_copy(p, hdev->hw.mac.mac_addr); } -static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p) +static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, + bool is_first) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; u8 *new_mac_addr = (u8 *)p; u8 msg_data[ETH_ALEN * 2]; + u16 subcode; int status; ether_addr_copy(msg_data, new_mac_addr); ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); + subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : + HCLGE_MBX_MAC_VLAN_UC_MODIFY; + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, - HCLGE_MBX_MAC_VLAN_UC_MODIFY, - msg_data, ETH_ALEN * 2, - false, NULL, 0); + subcode, msg_data, ETH_ALEN * 2, + true, NULL, 0); if (!status) ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); @@ -816,11 +834,149 @@ static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); u8 msg_data[2]; + int ret; memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); - hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false, - NULL, 0); + /* disable vf queue before send queue reset msg to PF */ + ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); + if (ret) + return; + + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, + 2, true, NULL, 0); +} + +static int hclgevf_notify_client(struct hclgevf_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_client *client = hdev->nic_client; + struct hnae3_handle *handle = &hdev->nic; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + return client->ops->reset_notify(handle, type); +} + +static int hclgevf_reset_wait(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_RESET_WAIT_MS 500 +#define HCLGEVF_RESET_WAIT_CNT 20 + u32 val, cnt = 0; + + /* wait to check the hardware reset completion status */ + val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); + while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) && + (cnt < HCLGEVF_RESET_WAIT_CNT)) { + msleep(HCLGEVF_RESET_WAIT_MS); + val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); + cnt++; + } + + /* hardware completion status should be available by this time */ + if (cnt >= HCLGEVF_RESET_WAIT_CNT) { + dev_warn(&hdev->pdev->dev, + "could'nt get reset done status from h/w, timeout!\n"); + return -EBUSY; + } + + /* we will wait a bit more to let reset of the stack to complete. This + * might happen in case reset assertion was made by PF. Yes, this also + * means we might end up waiting bit more even for VF reset. + */ + msleep(5000); + + return 0; +} + +static int hclgevf_reset_stack(struct hclgevf_dev *hdev) +{ + int ret; + + /* uninitialize the nic client */ + hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + + /* re-initialize the hclge device */ + ret = hclgevf_init_hdev(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "hclge device re-init failed, VF is disabled!\n"); + return ret; + } + + /* bring up the nic client again */ + hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); + + return 0; +} + +static int hclgevf_reset(struct hclgevf_dev *hdev) +{ + int ret; + + rtnl_lock(); + + /* bring down the nic to stop any ongoing TX/RX */ + hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + + /* check if VF could successfully fetch the hardware reset completion + * status from the hardware + */ + ret = hclgevf_reset_wait(hdev); + if (ret) { + /* can't do much in this situation, will disable VF */ + dev_err(&hdev->pdev->dev, + "VF failed(=%d) to fetch H/W reset completion status\n", + ret); + + dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); + hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + + rtnl_unlock(); + return ret; + } + + /* now, re-initialize the nic client and ae device*/ + ret = hclgevf_reset_stack(hdev); + if (ret) + dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); + + /* bring up the nic to enable TX/RX again */ + hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + + rtnl_unlock(); + + return ret; +} + +static int hclgevf_do_reset(struct hclgevf_dev *hdev) +{ + int status; + u8 respmsg; + + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, + 0, false, &respmsg, sizeof(u8)); + if (status) + dev_err(&hdev->pdev->dev, + "VF reset request to PF failed(=%d)\n", status); + + return status; +} + +static void hclgevf_reset_event(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); + + handle->reset_level = HNAE3_VF_RESET; + + /* reset of this VF requested */ + set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); + hclgevf_reset_task_schedule(hdev); + + handle->last_reset_time = jiffies; } static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) @@ -845,10 +1001,22 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) hdev->num_msi_used += 1; } -static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) +void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) +{ + if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && + !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { + set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); + schedule_work(&hdev->rst_service_task); + } +} + +void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) { - if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) + if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && + !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { + set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); schedule_work(&hdev->mbx_service_task); + } } static void hclgevf_task_schedule(struct hclgevf_dev *hdev) @@ -858,6 +1026,16 @@ static void hclgevf_task_schedule(struct hclgevf_dev *hdev) schedule_work(&hdev->service_task); } +static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) +{ + /* if we have any pending mailbox event then schedule the mbx task */ + if (hdev->mbx_event_pending) + hclgevf_mbx_task_schedule(hdev); + + if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) + hclgevf_reset_task_schedule(hdev); +} + static void hclgevf_service_timer(struct timer_list *t) { struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); @@ -867,6 +1045,75 @@ static void hclgevf_service_timer(struct timer_list *t) hclgevf_task_schedule(hdev); } +static void hclgevf_reset_service_task(struct work_struct *work) +{ + struct hclgevf_dev *hdev = + container_of(work, struct hclgevf_dev, rst_service_task); + int ret; + + if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + return; + + clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); + + if (test_and_clear_bit(HCLGEVF_RESET_PENDING, + &hdev->reset_state)) { + /* PF has initmated that it is about to reset the hardware. + * We now have to poll & check if harware has actually completed + * the reset sequence. On hardware reset completion, VF needs to + * reset the client and ae device. + */ + hdev->reset_attempts = 0; + + ret = hclgevf_reset(hdev); + if (ret) + dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); + } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, + &hdev->reset_state)) { + /* we could be here when either of below happens: + * 1. reset was initiated due to watchdog timeout due to + * a. IMP was earlier reset and our TX got choked down and + * which resulted in watchdog reacting and inducing VF + * reset. This also means our cmdq would be unreliable. + * b. problem in TX due to other lower layer(example link + * layer not functioning properly etc.) + * 2. VF reset might have been initiated due to some config + * change. + * + * NOTE: Theres no clear way to detect above cases than to react + * to the response of PF for this reset request. PF will ack the + * 1b and 2. cases but we will not get any intimation about 1a + * from PF as cmdq would be in unreliable state i.e. mailbox + * communication between PF and VF would be broken. + */ + + /* if we are never geting into pending state it means either: + * 1. PF is not receiving our request which could be due to IMP + * reset + * 2. PF is screwed + * We cannot do much for 2. but to check first we can try reset + * our PCIe + stack and see if it alleviates the problem. + */ + if (hdev->reset_attempts > 3) { + /* prepare for full reset of stack + pcie interface */ + hdev->nic.reset_level = HNAE3_VF_FULL_RESET; + + /* "defer" schedule the reset task again */ + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + } else { + hdev->reset_attempts++; + + /* request PF for resetting this VF via mailbox */ + ret = hclgevf_do_reset(hdev); + if (ret) + dev_warn(&hdev->pdev->dev, + "VF rst fail, stack will call\n"); + } + } + + clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); +} + static void hclgevf_mailbox_service_task(struct work_struct *work) { struct hclgevf_dev *hdev; @@ -878,7 +1125,7 @@ static void hclgevf_mailbox_service_task(struct work_struct *work) clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); - hclgevf_mbx_handler(hdev); + hclgevf_mbx_async_handler(hdev); clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); } @@ -894,6 +1141,8 @@ static void hclgevf_service_task(struct work_struct *work) */ hclgevf_request_link_info(hdev); + hclgevf_deferred_task_schedule(hdev); + clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); } @@ -936,8 +1185,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) if (!hclgevf_check_event_cause(hdev, &clearval)) goto skip_sched; - /* schedule the VF mailbox service task, if not already scheduled */ - hclgevf_mbx_task_schedule(hdev); + hclgevf_mbx_handler(hdev); hclgevf_clear_event_cause(hdev, clearval); @@ -959,6 +1207,22 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) return hclgevf_get_tc_info(hdev); } +static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + struct hclgevf_dev *hdev = ae_dev->priv; + + hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + hdev->pdev = pdev; + hdev->ae_dev = ae_dev; + ae_dev->priv = hdev; + + return 0; +} + static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) { struct hnae3_handle *roce = &hdev->roce; @@ -1057,10 +1321,17 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); + del_timer_sync(&hdev->service_timer); + cancel_work_sync(&hdev->service_task); + hclgevf_update_link_status(hdev, 0); } static void hclgevf_state_init(struct hclgevf_dev *hdev) { + /* if this is on going reset then skip this initialization */ + if (hclgevf_dev_ongoing_reset(hdev)) + return; + /* setup tasks for the MBX */ INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); @@ -1072,6 +1343,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev) INIT_WORK(&hdev->service_task, hclgevf_service_task); clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); + INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); + mutex_init(&hdev->mbx_resp.mbx_mutex); /* bring the device down */ @@ -1088,6 +1361,8 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev) cancel_work_sync(&hdev->service_task); if (hdev->mbx_service_task.func) cancel_work_sync(&hdev->mbx_service_task); + if (hdev->rst_service_task.func) + cancel_work_sync(&hdev->rst_service_task); mutex_destroy(&hdev->mbx_resp.mbx_mutex); } @@ -1098,6 +1373,10 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) int vectors; int i; + /* if this is on going reset then skip this initialization */ + if (hclgevf_dev_ongoing_reset(hdev)) + return 0; + hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM; vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, @@ -1148,6 +1427,10 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) { int ret = 0; + /* if this is on going reset then skip this initialization */ + if (hclgevf_dev_ongoing_reset(hdev)) + return 0; + hclgevf_get_misc_vector(hdev); ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, @@ -1258,6 +1541,14 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) struct hclgevf_hw *hw; int ret; + /* check if we need to skip initialization of pci. This will happen if + * device is undergoing VF reset. Otherwise, we would need to + * re-initialize pci interface again i.e. when device is not going + * through *any* reset or actually undergoing full reset. + */ + if (hclgevf_dev_ongoing_reset(hdev)) + return 0; + ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "failed to enable PCI device\n"); @@ -1309,19 +1600,16 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) pci_set_drvdata(pdev, NULL); } -static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) +static int hclgevf_init_hdev(struct hclgevf_dev *hdev) { - struct pci_dev *pdev = ae_dev->pdev; - struct hclgevf_dev *hdev; + struct pci_dev *pdev = hdev->pdev; int ret; - hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); - if (!hdev) - return -ENOMEM; - - hdev->pdev = pdev; - hdev->ae_dev = ae_dev; - ae_dev->priv = hdev; + /* check if device is on-going full reset(i.e. pcie as well) */ + if (hclgevf_dev_ongoing_full_reset(hdev)) { + dev_warn(&pdev->dev, "device is going full reset\n"); + hclgevf_uninit_hdev(hdev); + } ret = hclgevf_pci_init(hdev); if (ret) { @@ -1406,15 +1694,38 @@ err_irq_init: return ret; } -static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) +static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) { - struct hclgevf_dev *hdev = ae_dev->priv; - hclgevf_cmd_uninit(hdev); hclgevf_misc_irq_uninit(hdev); hclgevf_state_uninit(hdev); hclgevf_uninit_msi(hdev); hclgevf_pci_uninit(hdev); +} + +static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + int ret; + + ret = hclgevf_alloc_hdev(ae_dev); + if (ret) { + dev_err(&pdev->dev, "hclge device allocation failed\n"); + return ret; + } + + ret = hclgevf_init_hdev(ae_dev->priv); + if (ret) + dev_err(&pdev->dev, "hclge device initialization failed\n"); + + return ret; +} + +static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + hclgevf_uninit_hdev(hdev); ae_dev->priv = NULL; } @@ -1447,6 +1758,43 @@ static void hclgevf_get_channels(struct hnae3_handle *handle, ch->combined_count = hdev->num_tqps; } +static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, + u16 *free_tqps, u16 *max_rss_size) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + *free_tqps = 0; + *max_rss_size = hdev->rss_size_max; +} + +static int hclgevf_get_status(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->hw.mac.link; +} + +static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, + u8 *auto_neg, u32 *speed, + u8 *duplex) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + if (speed) + *speed = hdev->hw.mac.speed; + if (duplex) + *duplex = hdev->hw.mac.duplex; + if (auto_neg) + *auto_neg = AUTONEG_DISABLE; +} + +void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, + u8 duplex) +{ + hdev->hw.mac.speed = speed; + hdev->hw.mac.duplex = duplex; +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, @@ -1457,6 +1805,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .map_ring_to_vector = hclgevf_map_ring_to_vector, .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, .get_vector = hclgevf_get_vector, + .put_vector = hclgevf_put_vector, .reset_queue = hclgevf_reset_tqp, .set_promisc_mode = hclgevf_set_promisc_mode, .get_mac_addr = hclgevf_get_mac_addr, @@ -1476,7 +1825,11 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_tc_size = hclgevf_get_tc_size, .get_fw_version = hclgevf_get_fw_version, .set_vlan_filter = hclgevf_set_vlan_filter, + .reset_event = hclgevf_reset_event, .get_channels = hclgevf_get_channels, + .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, + .get_status = hclgevf_get_status, + .get_ksettings_an_result = hclgevf_get_ksettings_an_result, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index a63bee4a3674..a477a7c36bbd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -34,6 +34,9 @@ #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 #define HCLGEVF_TQP_RESET_TRY_TIMES 10 +/* Reset related Registers */ +#define HCLGEVF_FUN_RST_ING 0x20C00 +#define HCLGEVF_FUN_RST_ING_B 0 #define HCLGEVF_RSS_IND_TBL_SIZE 512 #define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff @@ -52,6 +55,8 @@ enum hclgevf_states { HCLGEVF_STATE_DISABLED, /* task states */ HCLGEVF_STATE_SERVICE_SCHED, + HCLGEVF_STATE_RST_SERVICE_SCHED, + HCLGEVF_STATE_RST_HANDLING, HCLGEVF_STATE_MBX_SERVICE_SCHED, HCLGEVF_STATE_MBX_HANDLING, }; @@ -61,6 +66,8 @@ enum hclgevf_states { struct hclgevf_mac { u8 mac_addr[ETH_ALEN]; int link; + u8 duplex; + u32 speed; }; struct hclgevf_hw { @@ -120,6 +127,11 @@ struct hclgevf_dev { struct hclgevf_rss_cfg rss_cfg; unsigned long state; +#define HCLGEVF_RESET_REQUESTED 0 +#define HCLGEVF_RESET_PENDING 1 + unsigned long reset_state; /* requested, pending */ + u32 reset_attempts; + u32 fw_version; u16 num_tqps; /* num task queue pairs of this PF */ @@ -140,10 +152,13 @@ struct hclgevf_dev { int *vector_irq; bool accept_mta_mc; /* whether to accept mta filter multicast */ + bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ + struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ struct timer_list service_timer; struct work_struct service_task; + struct work_struct rst_service_task; struct work_struct mbx_service_task; struct hclgevf_tqp *htqp; @@ -156,9 +171,29 @@ struct hclgevf_dev { u32 flag; }; +static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev) +{ + return (hdev && + (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && + (hdev->nic.reset_level == HNAE3_VF_RESET)); +} + +static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev) +{ + return (hdev && + (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && + (hdev->nic.reset_level == HNAE3_VF_FULL_RESET)); +} + int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, const u8 *msg_data, u8 msg_len, bool need_resp, u8 *resp_data, u16 resp_len); void hclgevf_mbx_handler(struct hclgevf_dev *hdev); +void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev); + void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state); +void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, + u8 duplex); +void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); +void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index e39cad285fa9..a28618428338 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -54,6 +54,10 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, mbx_resp = &hdev->mbx_resp; r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16); r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff); + + if (mbx_resp->resp_status) + return mbx_resp->resp_status; + if (resp_data) memcpy(resp_data, &mbx_resp->additional_info[0], resp_len); @@ -128,7 +132,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) struct hclge_mbx_pf_to_vf_cmd *req; struct hclgevf_cmq_ring *crq; struct hclgevf_desc *desc; - u16 link_status, flag; + u16 *msg_q; + u16 flag; u8 *temp; int i; @@ -140,6 +145,12 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; + /* synchronous messages are time critical and need preferential + * treatment. Therefore, we need to acknowledge all the sync + * responses as quickly as possible so that waiting tasks do not + * timeout and simultaneously queue the async messages for later + * prcessing in context of mailbox task i.e. the slow path. + */ switch (req->msg[0]) { case HCLGE_MBX_PF_VF_RESP: if (resp->received_resp) @@ -159,10 +170,31 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) } break; case HCLGE_MBX_LINK_STAT_CHANGE: - link_status = le16_to_cpu(req->msg[1]); + case HCLGE_MBX_ASSERTING_RESET: + /* set this mbx event as pending. This is required as we + * might loose interrupt event when mbx task is busy + * handling. This shall be cleared when mbx task just + * enters handling state. + */ + hdev->mbx_event_pending = true; - /* update upper layer with new link link status */ - hclgevf_update_link_status(hdev, link_status); + /* we will drop the async msg if we find ARQ as full + * and continue with next message + */ + if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) { + dev_warn(&hdev->pdev->dev, + "Async Q full, dropping msg(%d)\n", + req->msg[1]); + break; + } + + /* tail the async message in arq */ + msg_q = hdev->arq.msg_q[hdev->arq.tail]; + memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE); + hclge_mbx_tail_ptr_move_arq(hdev->arq); + hdev->arq.count++; + + hclgevf_mbx_task_schedule(hdev); break; default: @@ -171,6 +203,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) req->msg[0]); break; } + crq->desc[crq->next_to_use].flag = 0; hclge_mbx_ring_ptr_move_crq(crq); flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); } @@ -179,3 +212,57 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG, crq->next_to_use); } + +void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) +{ + u16 link_status; + u16 *msg_q; + u8 duplex; + u32 speed; + u32 tail; + + /* we can safely clear it now as we are at start of the async message + * processing + */ + hdev->mbx_event_pending = false; + + tail = hdev->arq.tail; + + /* process all the async queue messages */ + while (tail != hdev->arq.head) { + msg_q = hdev->arq.msg_q[hdev->arq.head]; + + switch (msg_q[0]) { + case HCLGE_MBX_LINK_STAT_CHANGE: + link_status = le16_to_cpu(msg_q[1]); + memcpy(&speed, &msg_q[2], sizeof(speed)); + duplex = (u8)le16_to_cpu(msg_q[4]); + + /* update upper layer with new link link status */ + hclgevf_update_link_status(hdev, link_status); + hclgevf_update_speed_duplex(hdev, speed, duplex); + + break; + case HCLGE_MBX_ASSERTING_RESET: + /* PF has asserted reset hence VF should go in pending + * state and poll for the hardware reset status till it + * has been completely reset. After this stack should + * eventually be re-initialized. + */ + hdev->nic.reset_level = HNAE3_VF_RESET; + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + hclgevf_reset_task_schedule(hdev); + + break; + default: + dev_err(&hdev->pdev->dev, + "fetched unsupported(%d) message from arq\n", + msg_q[0]); + break; + } + + hclge_mbx_head_ptr_move_arq(hdev->arq); + hdev->arq.count--; + msg_q = hdev->arq.msg_q[hdev->arq.head]; + } +} |