summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3_enet.c')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c408
1 files changed, 203 insertions, 205 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 25a73bb2e642..3554dca7a680 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
@@ -56,15 +50,16 @@ static const struct pci_device_id hns3_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
-static irqreturn_t hns3_irq_handle(int irq, void *dev)
+static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
- struct hns3_enet_tqp_vector *tqp_vector = dev;
+ struct hns3_enet_tqp_vector *tqp_vector = vector;
napi_schedule(&tqp_vector->napi);
@@ -239,7 +234,28 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
- int ret;
+ int i, ret;
+
+ if (kinfo->num_tc <= 1) {
+ netdev_reset_tc(netdev);
+ } else {
+ ret = netdev_set_num_tc(netdev, kinfo->num_tc);
+ if (ret) {
+ netdev_err(netdev,
+ "netdev_set_num_tc fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (!kinfo->tc_info[i].enable)
+ continue;
+
+ netdev_set_tc_queue(netdev,
+ kinfo->tc_info[i].tc,
+ kinfo->tc_info[i].tqp_count,
+ kinfo->tc_info[i].tqp_offset);
+ }
+ }
ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) {
@@ -312,7 +328,9 @@ out_start_err:
static int hns3_nic_net_open(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo;
+ int i, ret;
netif_carrier_off(netdev);
@@ -327,6 +345,12 @@ static int hns3_nic_net_open(struct net_device *netdev)
return ret;
}
+ kinfo = &h->kinfo;
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ netdev_set_prio_tc_map(netdev, i,
+ kinfo->prio_tc[i]);
+ }
+
priv->ae_handle->last_reset_time = jiffies;
return 0;
}
@@ -493,8 +517,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* find the txbd field values */
*paylen = skb->len - hdr_len;
- hnae_set_bit(*type_cs_vlan_tso,
- HNS3_TXD_TSO_B, 1);
+ hnae3_set_bit(*type_cs_vlan_tso,
+ HNS3_TXD_TSO_B, 1);
/* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size;
@@ -586,21 +610,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute L2 header size for normal packet, defined in 2 Bytes */
l2_len = l3.hdr - skb->data;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, l2_len >> 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, l2_len >> 1);
/* tunnel packet*/
if (skb->encapsulation) {
/* compute OL2 header size, defined in 2 Bytes */
ol2_len = l2_len;
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, ol2_len >> 1);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, ol2_len >> 1);
/* compute OL3 header size, defined in 4 Bytes */
ol3_len = l4.hdr - l3.hdr;
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
- HNS3_TXD_L3LEN_S, ol3_len >> 2);
+ hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
+ HNS3_TXD_L3LEN_S, ol3_len >> 2);
/* MAC in UDP, MAC in GRE (0x6558)*/
if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
@@ -609,16 +633,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute OL4 header size, defined in 4 Bytes. */
ol4_len = l2_hdr - l4.hdr;
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, ol4_len >> 2);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
+ ol4_len >> 2);
/* switch IP header ptr from outer to inner header */
l3.hdr = skb_inner_network_header(skb);
/* compute inner l2 header size, defined in 2 Bytes. */
l2_len = l3.hdr - l2_hdr;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, l2_len >> 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, l2_len >> 1);
} else {
/* skb packet types not supported by hardware,
* txbd len fild doesn't be filled.
@@ -634,22 +659,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute inner(/normal) L3 header size, defined in 4 Bytes */
l3_len = l4.hdr - l3.hdr;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
- HNS3_TXD_L3LEN_S, l3_len >> 2);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
+ HNS3_TXD_L3LEN_S, l3_len >> 2);
/* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) {
case IPPROTO_TCP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, l4.tcp->doff);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S, l4.tcp->doff);
break;
case IPPROTO_SCTP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (sizeof(struct sctphdr) >> 2));
break;
case IPPROTO_UDP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (sizeof(struct udphdr) >> 2));
break;
default:
/* skb packet types not supported by hardware,
@@ -703,32 +730,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
/* define outer network header type.*/
if (skb->protocol == htons(ETH_P_IP)) {
if (skb_is_gso(skb))
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_CSUM);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_CSUM);
else
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_NO_CSUM);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_NO_CSUM);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
- HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
+ hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
}
/* define tunnel type(OL4).*/
switch (l4_proto) {
case IPPROTO_UDP:
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_M,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_MAC_IN_UDP);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_MAC_IN_UDP);
break;
case IPPROTO_GRE:
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_M,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_NVGRE);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_NVGRE);
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -749,43 +778,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
}
if (l3.v4->version == 4) {
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
- HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
+ HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
if (skb_is_gso(skb))
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
-
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
} else if (l3.v6->version == 6) {
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
- HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
+ HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
}
switch (l4_proto) {
case IPPROTO_TCP:
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_TCP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_TCP);
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
break;
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_UDP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_UDP);
break;
case IPPROTO_SCTP:
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_SCTP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_SCTP);
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -807,11 +836,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
{
/* Config bd buffer end */
- hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
- HNS3_TXD_BDTYPE_S, 0);
- hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
- hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
- hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
+ hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
+ HNS3_TXD_BDTYPE_S, 0);
+ hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
+ hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
+ hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
}
static int hns3_fill_desc_vtags(struct sk_buff *skb,
@@ -844,10 +873,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
* and use inner_vtag in one tag case.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
- hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+ hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
*out_vtag = vlan_tag;
} else {
- hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+ hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
}
} else if (skb->protocol == htons(ETH_P_8021Q)) {
@@ -880,7 +909,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
u16 out_vtag = 0;
u32 paylen = 0;
u16 mss = 0;
- __be16 protocol;
u8 ol4_proto;
u8 il4_proto;
int ret;
@@ -909,7 +937,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb);
- protocol = skb->protocol;
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (ret)
@@ -1135,7 +1162,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
wmb(); /* Commit all data before submit */
- hnae_queue_xmit(ring->tqp, buf_num);
+ hnae3_queue_xmit(ring->tqp, buf_num);
return NETDEV_TX_OK;
@@ -1304,7 +1331,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
u16 mode = mqprio_qopt->mode;
u8 hw = mqprio_qopt->qopt.hw;
bool if_running;
- unsigned int i;
int ret;
if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
@@ -1328,24 +1354,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
goto out;
- if (tc <= 1) {
- netdev_reset_tc(netdev);
- } else {
- ret = netdev_set_num_tc(netdev, tc);
- if (ret)
- goto out;
-
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (!kinfo->tc_info[i].enable)
- continue;
-
- netdev_set_tc_queue(netdev,
- kinfo->tc_info[i].tc,
- kinfo->tc_info[i].tqp_count,
- kinfo->tc_info[i].tqp_offset);
- }
- }
-
ret = hns3_nic_set_real_num_queue(netdev);
out:
@@ -1665,6 +1673,9 @@ static struct pci_driver hns3_driver = {
/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct pci_dev *pdev = h->pdev;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1698,12 +1709,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (pdev->revision != 0x20)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- unsigned int order = hnae_page_order(ring);
+ unsigned int order = hnae3_page_order(ring);
struct page *p;
p = dev_alloc_pages(order);
@@ -1714,7 +1728,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->page_offset = 0;
cb->reuse_flag = 0;
cb->buf = page_address(p);
- cb->length = hnae_page_size(ring);
+ cb->length = hnae3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
return 0;
@@ -1780,33 +1794,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
/* free desc along with its attached buffer */
static void hns3_free_desc(struct hns3_enet_ring *ring)
{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
hns3_free_buffers(ring);
- dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- DMA_BIDIRECTIONAL);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hns3_alloc_desc(struct hns3_enet_ring *ring)
{
int size = ring->desc_num * sizeof(ring->desc[0]);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
+ &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
@@ -1887,7 +1895,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length;
- /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
+ /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
hns3_free_buffer_detach(ring, ring->next_to_clean);
ring_ptr_move_fw(ring, next_to_clean);
@@ -1917,7 +1925,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
if (is_ring_empty(ring) || head == ring->next_to_clean)
return true; /* no data to poll */
- if (!is_valid_clean_head(ring, head)) {
+ if (unlikely(!is_valid_clean_head(ring, head))) {
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
@@ -2016,15 +2024,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
bool twobufs;
twobufs = ((PAGE_SIZE < 8192) &&
- hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
+ hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size);
- truesize = hnae_buf_size(ring);
+ truesize = hnae3_buf_size(ring);
if (!twobufs)
- last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
+ last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
@@ -2076,13 +2084,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
/* check if hardware has done checksum */
- if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
+ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return;
- if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
+ if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
netdev_err(netdev, "L3/L4 error pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.l3l4_csum_err++;
@@ -2091,23 +2099,25 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
}
- l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
- HNS3_RXD_L3ID_S);
- l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
- HNS3_RXD_L4ID_S);
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+ HNS3_RXD_L4ID_S);
- ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
+ ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
+ HNS3_RXD_OL4ID_S);
switch (ol4_type) {
case HNS3_OL4_TYPE_MAC_IN_UDP:
case HNS3_OL4_TYPE_NVGRE:
skb->csum_level = 1;
+ /* fall through */
case HNS3_OL4_TYPE_NO_TUN:
/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
- if (l3_type == HNS3_L3_TYPE_IPV4 ||
- (l3_type == HNS3_L3_TYPE_IPV6 &&
- (l4_type == HNS3_L4_TYPE_UDP ||
- l4_type == HNS3_L4_TYPE_TCP ||
- l4_type == HNS3_L4_TYPE_SCTP)))
+ if ((l3_type == HNS3_L3_TYPE_IPV4 ||
+ l3_type == HNS3_L3_TYPE_IPV6) &&
+ (l4_type == HNS3_L4_TYPE_UDP ||
+ l4_type == HNS3_L4_TYPE_TCP ||
+ l4_type == HNS3_L4_TYPE_SCTP))
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
}
@@ -2135,8 +2145,8 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
#define HNS3_STRP_OUTER_VLAN 0x1
#define HNS3_STRP_INNER_VLAN 0x2
- switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
- HNS3_RXD_STRP_TAGP_S)) {
+ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
+ HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
break;
@@ -2174,7 +2184,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Check valid BD */
- if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
return -EFAULT;
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
@@ -2229,7 +2239,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
- while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+ while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
@@ -2257,7 +2267,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
vlan_tag);
}
- if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
+ if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
((u64 *)desc)[0], ((u64 *)desc)[1]);
u64_stats_update_begin(&ring->syncp);
@@ -2269,7 +2279,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
}
if (unlikely((!desc->rx.pkt_len) ||
- hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
+ hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
netdev_err(netdev, "truncated pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.err_pkt_len++;
@@ -2279,7 +2289,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return -EFAULT;
}
- if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
+ if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
netdev_err(netdev, "L2 error pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.l2_err++;
@@ -2532,10 +2542,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
tx_ring = tqp_vector->tx_group.ring;
if (tx_ring) {
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
+ hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_TX);
+ hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL;
@@ -2549,12 +2559,12 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->next = chain;
chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae_set_field(chain->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S,
- HNAE3_RING_GL_TX);
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_TX);
+ hnae3_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ HNAE3_RING_GL_TX);
cur_chain = chain;
}
@@ -2564,10 +2574,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
if (!tx_ring && rx_ring) {
cur_chain->next = NULL;
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_RX);
+ hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next;
}
@@ -2579,10 +2589,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->next = chain;
chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_RX);
+ hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
cur_chain = chain;
@@ -2745,10 +2755,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
if (ret)
return ret;
- ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
- if (ret)
- return ret;
-
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
@@ -2809,7 +2815,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->io_base = q->io_base;
}
- hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
+ hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
ring->tqp = q;
ring->desc = NULL;
@@ -2969,13 +2975,33 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
- hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
- hns3_buf_size2type(ring->buf_size));
hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
ring->desc_num / 8 - 1);
}
}
+static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+ int i;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
+ int j;
+
+ if (!tc_info->enable)
+ continue;
+
+ for (j = 0; j < tc_info->tqp_count; j++) {
+ struct hnae3_queue *q;
+
+ q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
+ hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
+ tc_info->tc);
+ }
+ }
+}
+
int hns3_init_all_ring(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
@@ -3081,7 +3107,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->dev = &pdev->dev;
priv->netdev = netdev;
priv->ae_handle = handle;
- priv->ae_handle->reset_level = HNAE3_NONE_RESET;
priv->ae_handle->last_reset_time = jiffies;
priv->tx_timeout_count = 0;
@@ -3102,6 +3127,11 @@ static int hns3_client_init(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ if (handle->flags & HNAE3_SUPPORT_VF)
+ handle->reset_level = HNAE3_VF_RESET;
+ else
+ handle->reset_level = HNAE3_FUNC_RESET;
+
ret = hns3_get_ring_config(priv);
if (ret) {
ret = -ENOMEM;
@@ -3208,7 +3238,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
struct net_device *ndev = kinfo->netdev;
bool if_running;
int ret;
- u8 i;
if (tc > HNAE3_MAX_TC)
return -EINVAL;
@@ -3218,10 +3247,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if_running = netif_running(ndev);
- ret = netdev_set_num_tc(ndev, tc);
- if (ret)
- return ret;
-
if (if_running) {
(void)hns3_nic_net_stop(ndev);
msleep(100);
@@ -3232,27 +3257,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if (ret)
goto err_out;
- if (tc <= 1) {
- netdev_reset_tc(ndev);
- goto out;
- }
-
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
-
- if (tc_info->enable)
- netdev_set_tc_queue(ndev,
- tc_info->tc,
- tc_info->tqp_count,
- tc_info->tqp_offset);
- }
-
- for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
- netdev_set_prio_tc_map(ndev, i,
- kinfo->prio_tc[i]);
- }
-
-out:
ret = hns3_nic_set_real_num_queue(ndev);
err_out:
@@ -3409,6 +3413,8 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
rx_ring->next_to_use = 0;
}
+ hns3_init_tx_ring_tc(priv);
+
return 0;
}
@@ -3418,7 +3424,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
struct net_device *ndev = kinfo->netdev;
if (!netif_running(ndev))
- return -EIO;
+ return 0;
return hns3_nic_net_stop(ndev);
}
@@ -3458,10 +3464,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- ret = hns3_get_ring_config(priv);
- if (ret)
- return ret;
-
ret = hns3_nic_init_vector_data(priv);
if (ret)
return ret;
@@ -3493,10 +3495,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
if (ret)
netdev_err(netdev, "uninit ring error\n");
- hns3_put_ring_config(priv);
-
- priv->ring_data = NULL;
-
hns3_uninit_mac_addr(netdev);
return ret;