summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon')
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig10
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c142
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c455
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c60
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c70
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c95
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c799
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1348
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h62
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c170
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c59
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c286
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c3
29 files changed, 2553 insertions, 1248 deletions
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index a0d780c14e60..3892a2062404 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -46,6 +46,16 @@ config HIP04_ETH
If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
want to use the internal ethernet then you should answer Y to this.
+config HI13X1_GMAC
+ bool "Hisilicon HI13X1 Network Device Support"
+ depends on HIP04_ETH
+ help
+ If you wish to compile a kernel for a hardware with hisilicon hi13x1_gamc
+ then you should answer Y to this. This makes this driver suitable for use
+ on certain boards such as the HI13X1.
+
+ If you are unsure, say N.
+
config HNS_MDIO
tristate
select PHYLIB
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index e1f2978506fd..625635771b83 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -16,6 +16,8 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#define SC_PPE_RESET_DREQ 0x026C
+
#define PPE_CFG_RX_ADDR 0x100
#define PPE_CFG_POOL_GRP 0x300
#define PPE_CFG_RX_BUF_SIZE 0x400
@@ -33,10 +35,23 @@
#define GE_MODE_CHANGE_REG 0x1b4
#define GE_RECV_CONTROL_REG 0x1e0
#define GE_STATION_MAC_ADDRESS 0x210
-#define PPE_CFG_CPU_ADD_ADDR 0x580
-#define PPE_CFG_MAX_FRAME_LEN_REG 0x408
+
#define PPE_CFG_BUS_CTRL_REG 0x424
#define PPE_CFG_RX_CTRL_REG 0x428
+
+#if defined(CONFIG_HI13X1_GMAC)
+#define PPE_CFG_CPU_ADD_ADDR 0x6D0
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x500
+#define PPE_CFG_RX_PKT_MODE_REG 0x504
+#define PPE_CFG_QOS_VMID_GEN 0x520
+#define PPE_CFG_RX_PKT_INT 0x740
+#define PPE_INTEN 0x700
+#define PPE_INTSTS 0x708
+#define PPE_RINT 0x704
+#define PPE_CFG_STS_MODE 0x880
+#else
+#define PPE_CFG_CPU_ADD_ADDR 0x580
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x408
#define PPE_CFG_RX_PKT_MODE_REG 0x438
#define PPE_CFG_QOS_VMID_GEN 0x500
#define PPE_CFG_RX_PKT_INT 0x538
@@ -44,8 +59,12 @@
#define PPE_INTSTS 0x608
#define PPE_RINT 0x604
#define PPE_CFG_STS_MODE 0x700
+#endif /* CONFIG_HI13X1_GMAC */
+
#define PPE_HIS_RX_PKT_CNT 0x804
+#define RESET_DREQ_ALL 0xffffffff
+
/* REG_INTERRUPT */
#define RCV_INT BIT(10)
#define RCV_NOBUF BIT(8)
@@ -57,8 +76,15 @@
/* TX descriptor config */
#define TX_FREE_MEM BIT(0)
#define TX_READ_ALLOC_L3 BIT(1)
-#define TX_FINISH_CACHE_INV BIT(2)
+#if defined(CONFIG_HI13X1_GMAC)
+#define TX_CLEAR_WB BIT(7)
+#define TX_RELEASE_TO_PPE BIT(4)
+#define TX_FINISH_CACHE_INV BIT(6)
+#define TX_POOL_SHIFT 16
+#else
#define TX_CLEAR_WB BIT(4)
+#define TX_FINISH_CACHE_INV BIT(2)
+#endif
#define TX_L3_CHECKSUM BIT(5)
#define TX_LOOP_BACK BIT(11)
@@ -93,18 +119,35 @@
#define GE_RX_PORT_EN BIT(1)
#define GE_TX_PORT_EN BIT(2)
-#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
-
#define PPE_CFG_RX_PKT_ALIGN BIT(18)
-#define PPE_CFG_QOS_VMID_MODE BIT(14)
+
+#if defined(CONFIG_HI13X1_GMAC)
+#define PPE_CFG_QOS_VMID_GRP_SHIFT 4
+#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 7
+#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(0)
+#define PPE_CFG_QOS_VMID_MODE BIT(15)
+#define PPE_CFG_BUS_LOCAL_REL (BIT(9) | BIT(15) | BIT(19) | BIT(23))
+
+/* buf unit size is cache_line_size, which is 64, so the shift is 6 */
+#define PPE_BUF_SIZE_SHIFT 6
+#define PPE_TX_BUF_HOLD BIT(31)
+#define CACHE_LINE_MASK 0x3F
+#else
#define PPE_CFG_QOS_VMID_GRP_SHIFT 8
+#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
+#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
+#define PPE_CFG_QOS_VMID_MODE BIT(14)
+#define PPE_CFG_BUS_LOCAL_REL BIT(14)
+
+/* buf unit size is 1, so the shift is 6 */
+#define PPE_BUF_SIZE_SHIFT 0
+#define PPE_TX_BUF_HOLD 0
+#endif /* CONFIG_HI13X1_GMAC */
#define PPE_CFG_RX_FIFO_FSFU BIT(11)
#define PPE_CFG_RX_DEPTH_SHIFT 16
#define PPE_CFG_RX_START_SHIFT 0
-#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
-#define PPE_CFG_BUS_LOCAL_REL BIT(14)
#define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
#define RX_DESC_NUM 128
@@ -128,26 +171,50 @@
#define HIP04_MIN_TX_COALESCE_FRAMES 100
struct tx_desc {
+#if defined(CONFIG_HI13X1_GMAC)
+ u32 reserved1[2];
+ u32 send_addr;
+ u16 send_size;
+ u16 data_offset;
+ u32 reserved2[7];
+ u32 cfg;
+ u32 wb_addr;
+ u32 reserved3[3];
+#else
u32 send_addr;
u32 send_size;
u32 next_addr;
u32 cfg;
u32 wb_addr;
+#endif
} __aligned(64);
struct rx_desc {
+#if defined(CONFIG_HI13X1_GMAC)
+ u32 reserved1[3];
+ u16 pkt_len;
+ u16 reserved_16;
+ u32 reserved2[6];
+ u32 pkt_err;
+ u32 reserved3[5];
+#else
u16 reserved_16;
u16 pkt_len;
u32 reserve1[3];
u32 pkt_err;
u32 reserve2[4];
+#endif
};
struct hip04_priv {
void __iomem *base;
+#if defined(CONFIG_HI13X1_GMAC)
+ void __iomem *sysctrl_base;
+#endif
int phy_mode;
int chan;
unsigned int port;
+ unsigned int group;
unsigned int speed;
unsigned int duplex;
unsigned int reg_inten;
@@ -221,6 +288,13 @@ static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
}
+static void hip04_reset_dreq(struct hip04_priv *priv)
+{
+#if defined(CONFIG_HI13X1_GMAC)
+ writel_relaxed(RESET_DREQ_ALL, priv->sysctrl_base + SC_PPE_RESET_DREQ);
+#endif
+}
+
static void hip04_reset_ppe(struct hip04_priv *priv)
{
u32 val, tmp, timeout = 0;
@@ -241,14 +315,14 @@ static void hip04_config_fifo(struct hip04_priv *priv)
val |= PPE_CFG_STS_RX_PKT_CNT_RC;
writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
- val = BIT(priv->port);
+ val = BIT(priv->group);
regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
- val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT;
+ val = priv->group << PPE_CFG_QOS_VMID_GRP_SHIFT;
val |= PPE_CFG_QOS_VMID_MODE;
writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
- val = RX_BUF_SIZE;
+ val = RX_BUF_SIZE >> PPE_BUF_SIZE_SHIFT;
regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
@@ -285,8 +359,10 @@ static void hip04_config_fifo(struct hip04_priv *priv)
val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
+#ifndef CONFIG_HI13X1_GMAC
val = GE_AUTO_NEG_CTL;
writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
+#endif
}
static void hip04_mac_enable(struct net_device *ndev)
@@ -329,12 +405,18 @@ static void hip04_mac_disable(struct net_device *ndev)
static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
{
- writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR);
+ u32 val;
+
+ val = phys >> PPE_BUF_SIZE_SHIFT | PPE_TX_BUF_HOLD;
+ writel(val, priv->base + PPE_CFG_CPU_ADD_ADDR);
}
static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
{
- regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys);
+ u32 val;
+
+ val = phys >> PPE_BUF_SIZE_SHIFT;
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, val);
}
static u32 hip04_recv_cnt(struct hip04_priv *priv)
@@ -442,11 +524,20 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->tx_skb[tx_head] = skb;
priv->tx_phys[tx_head] = phys;
- desc->send_addr = cpu_to_be32(phys);
- desc->send_size = cpu_to_be32(skb->len);
- desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
+
+ desc->send_size = (__force u32)cpu_to_be32(skb->len);
+#if defined(CONFIG_HI13X1_GMAC)
+ desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
+ | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
+ desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK);
+ desc->send_addr = (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK);
+#else
+ desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
+ desc->send_addr = (__force u32)cpu_to_be32(phys);
+#endif
phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
- desc->wb_addr = cpu_to_be32(phys);
+ desc->wb_addr = (__force u32)cpu_to_be32(phys +
+ offsetof(struct tx_desc, send_addr));
skb_tx_timestamp(skb);
hip04_set_xmit_desc(priv, phys);
@@ -507,8 +598,8 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
priv->rx_phys[priv->rx_head] = 0;
desc = (struct rx_desc *)skb->data;
- len = be16_to_cpu(desc->pkt_len);
- err = be32_to_cpu(desc->pkt_err);
+ len = be16_to_cpu((__force __be16)desc->pkt_len);
+ err = be32_to_cpu((__force __be32)desc->pkt_err);
if (0 == len) {
dev_kfree_skb_any(skb);
@@ -828,7 +919,16 @@ static int hip04_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg);
+#if defined(CONFIG_HI13X1_GMAC)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->sysctrl_base = devm_ioremap_resource(d, res);
+ if (IS_ERR(priv->sysctrl_base)) {
+ ret = PTR_ERR(priv->sysctrl_base);
+ goto init_fail;
+ }
+#endif
+
+ ret = of_parse_phandle_with_fixed_args(node, "port-handle", 3, 0, &arg);
if (ret < 0) {
dev_warn(d, "no port-handle\n");
goto init_fail;
@@ -836,6 +936,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->port = arg.args[0];
priv->chan = arg.args[1] * RX_DESC_NUM;
+ priv->group = arg.args[2];
hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -896,6 +997,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
ndev->irq = irq;
netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
+ hip04_reset_dreq(priv);
hip04_reset_ppe(priv);
if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index fe879c07ae3c..2235dd55fab2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2370,6 +2370,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6;
ndev->max_mtu = MAC_MAX_MTU_V2 -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
break;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 83e19c6b974e..8ad5292eebbe 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -69,7 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode {
};
#define HCLGE_MBX_MAX_MSG_SIZE 16
-#define HCLGE_MBX_MAX_RESP_DATA_SIZE 16
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index fa8b8506b120..908d4f45c06a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -16,21 +16,18 @@ static LIST_HEAD(hnae3_ae_dev_list);
*/
static DEFINE_MUTEX(hnae3_common_lock);
-static bool hnae3_client_match(enum hnae3_client_type client_type,
- enum hnae3_dev_type dev_type)
+static bool hnae3_client_match(enum hnae3_client_type client_type)
{
- if ((dev_type == HNAE3_DEV_KNIC) && (client_type == HNAE3_CLIENT_KNIC ||
- client_type == HNAE3_CLIENT_ROCE))
- return true;
-
- if (dev_type == HNAE3_DEV_UNIC && client_type == HNAE3_CLIENT_UNIC)
+ if (client_type == HNAE3_CLIENT_KNIC ||
+ client_type == HNAE3_CLIENT_ROCE)
return true;
return false;
}
void hnae3_set_client_init_flag(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev, int inited)
+ struct hnae3_ae_dev *ae_dev,
+ unsigned int inited)
{
if (!client || !ae_dev)
return;
@@ -39,9 +36,6 @@ void hnae3_set_client_init_flag(struct hnae3_client *client,
case HNAE3_CLIENT_KNIC:
hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
break;
- case HNAE3_CLIENT_UNIC:
- hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
- break;
case HNAE3_CLIENT_ROCE:
hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
break;
@@ -61,10 +55,6 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
inited = hnae3_get_bit(ae_dev->flag,
HNAE3_KNIC_CLIENT_INITED_B);
break;
- case HNAE3_CLIENT_UNIC:
- inited = hnae3_get_bit(ae_dev->flag,
- HNAE3_UNIC_CLIENT_INITED_B);
- break;
case HNAE3_CLIENT_ROCE:
inited = hnae3_get_bit(ae_dev->flag,
HNAE3_ROCE_CLIENT_INITED_B);
@@ -82,7 +72,7 @@ static int hnae3_init_client_instance(struct hnae3_client *client,
int ret;
/* check if this client matches the type of ae_dev */
- if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
+ if (!(hnae3_client_match(client->type) &&
hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
return 0;
}
@@ -99,7 +89,7 @@ static void hnae3_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
/* check if this client matches the type of ae_dev */
- if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
+ if (!(hnae3_client_match(client->type) &&
hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
return;
@@ -251,6 +241,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ ae_dev->ops = NULL;
}
list_del(&ae_algo->node);
@@ -351,6 +342,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ ae_dev->ops = NULL;
}
list_del(&ae_dev->node);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index ad21b0ef1946..48c7b70fc2c4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -102,15 +102,9 @@ enum hnae3_loop {
enum hnae3_client_type {
HNAE3_CLIENT_KNIC,
- HNAE3_CLIENT_UNIC,
HNAE3_CLIENT_ROCE,
};
-enum hnae3_dev_type {
- HNAE3_DEV_KNIC,
- HNAE3_DEV_UNIC,
-};
-
/* mac media type */
enum hnae3_media_type {
HNAE3_MEDIA_TYPE_UNKNOWN,
@@ -154,7 +148,6 @@ enum hnae3_reset_type {
HNAE3_VF_FULL_RESET,
HNAE3_FLR_RESET,
HNAE3_FUNC_RESET,
- HNAE3_CORE_RESET,
HNAE3_GLOBAL_RESET,
HNAE3_IMP_RESET,
HNAE3_UNKNOWN_RESET,
@@ -220,8 +213,7 @@ struct hnae3_ae_dev {
const struct hnae3_ae_ops *ops;
struct list_head node;
u32 flag;
- u8 override_pci_need_reset; /* fix to stop multiple reset happening */
- enum hnae3_dev_type dev_type;
+ unsigned long hw_err_reset_req;
enum hnae3_reset_type reset_type;
void *priv;
};
@@ -271,6 +263,8 @@ struct hnae3_ae_dev {
* get auto autonegotiation of pause frame use
* restart_autoneg()
* restart autonegotiation
+ * halt_autoneg()
+ * halt/resume autonegotiation when autonegotiation on
* get_coalesce_usecs()
* get usecs to delay a TX interrupt after a packet is sent
* get_rx_max_coalesced_frames()
@@ -339,10 +333,14 @@ struct hnae3_ae_dev {
* Set vlan filter config of Ports
* set_vf_vlan_filter()
* Set vlan filter config of vf
+ * restore_vlan_table()
+ * Restore vlan filter entries after reset
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
* set_gro_en
* Enable/disable HW GRO
+ * add_arfs_entry
+ * Check the 5-tuples of flow, and create flow director rule
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -386,6 +384,7 @@ struct hnae3_ae_ops {
int (*set_autoneg)(struct hnae3_handle *handle, bool enable);
int (*get_autoneg)(struct hnae3_handle *handle);
int (*restart_autoneg)(struct hnae3_handle *handle);
+ int (*halt_autoneg)(struct hnae3_handle *handle, bool halt);
void (*get_coalesce_usecs)(struct hnae3_handle *handle,
u32 *tx_usecs, u32 *rx_usecs);
@@ -463,6 +462,8 @@ struct hnae3_ae_ops {
u16 vlan, u8 qos, __be16 proto);
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
+ enum hnae3_reset_type (*get_reset_level)(struct hnae3_ae_dev *ae_dev,
+ unsigned long *addr);
void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type);
void (*get_channels)(struct hnae3_handle *handle,
@@ -492,7 +493,9 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd, u32 *rule_locs);
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
- int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf);
+ int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
+ u16 flow_id, struct flow_keys *fkeys);
+ int (*dbg_run_cmd)(struct hnae3_handle *handle, const char *cmd_buf);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
@@ -502,6 +505,7 @@ struct hnae3_ae_ops {
void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
int (*mac_connect_phy)(struct hnae3_handle *handle);
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
+ void (*restore_vlan_table)(struct hnae3_handle *handle);
};
struct hnae3_dcb_ops {
@@ -643,5 +647,6 @@ void hnae3_unregister_client(struct hnae3_client *client);
int hnae3_register_client(struct hnae3_client *client);
void hnae3_set_client_init_flag(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev, int inited);
+ struct hnae3_ae_dev *ae_dev,
+ unsigned int inited);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index b6fabbbdfd5b..d2ec4c573bf8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -4,8 +4,7 @@
#include "hnae3.h"
#include "hns3_enet.h"
-static
-int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
+static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
@@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP;
}
-static
-int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
+static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
@@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP;
}
-static
-int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
+static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
@@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP;
}
-static
-int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
+static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index fc4917ac44be..a4b937286f55 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -11,7 +11,8 @@
static struct dentry *hns3_dbgfs_root;
-static int hns3_dbg_queue_info(struct hnae3_handle *h, char *cmd_buf)
+static int hns3_dbg_queue_info(struct hnae3_handle *h,
+ const char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
@@ -155,7 +156,7 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
return 0;
}
-static int hns3_dbg_bd_info(struct hnae3_handle *h, char *cmd_buf)
+static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
@@ -252,6 +253,7 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump qos buf cfg\n");
dev_info(&h->pdev->dev, "dump mng tbl\n");
dev_info(&h->pdev->dev, "dump reset info\n");
+ dev_info(&h->pdev->dev, "dump m7 info\n");
dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
dev_info(&h->pdev->dev, "dump mac tnl status\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index f326805543a4..310afa708831 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -4,6 +4,9 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -14,6 +17,7 @@
#include <linux/sctp.h>
#include <linux/vermagic.h>
#include <net/gre.h>
+#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/vxlan.h>
@@ -24,8 +28,7 @@
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
-static void hns3_clear_all_ring(struct hnae3_handle *h);
-static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
+static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
static void hns3_remove_hw_addr(struct net_device *netdev);
static const char hns3_driver_name[] = "hns3";
@@ -79,23 +82,6 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector)
return IRQ_HANDLED;
}
-/* This callback function is used to set affinity changes to the irq affinity
- * masks when the irq_set_affinity_notifier function is used.
- */
-static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct hns3_enet_tqp_vector *tqp_vectors =
- container_of(notify, struct hns3_enet_tqp_vector,
- affinity_notify);
-
- tqp_vectors->affinity_mask = *mask;
-}
-
-static void hns3_nic_irq_affinity_release(struct kref *ref)
-{
-}
-
static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
{
struct hns3_enet_tqp_vector *tqp_vectors;
@@ -107,8 +93,7 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
continue;
- /* clear the affinity notifier and affinity mask */
- irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
+ /* clear the affinity mask */
irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
/* release the irq resource */
@@ -153,20 +138,14 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
- tqp_vectors->name,
- tqp_vectors);
+ tqp_vectors->name, tqp_vectors);
if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n",
tqp_vectors->vector_irq);
+ hns3_nic_uninit_irq(priv);
return ret;
}
- tqp_vectors->affinity_notify.notify =
- hns3_nic_irq_affinity_notify;
- tqp_vectors->affinity_notify.release =
- hns3_nic_irq_affinity_release;
- irq_set_affinity_notifier(tqp_vectors->vector_irq,
- &tqp_vectors->affinity_notify);
irq_set_affinity_hint(tqp_vectors->vector_irq,
&tqp_vectors->affinity_mask);
@@ -297,8 +276,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) {
netdev_err(netdev,
- "netif_set_real_num_tx_queues fail, ret=%d!\n",
- ret);
+ "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
return ret;
}
@@ -340,6 +318,40 @@ static void hns3_tqp_disable(struct hnae3_queue *tqp)
hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
}
+static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(netdev->rx_cpu_rmap);
+ netdev->rx_cpu_rmap = NULL;
+#endif
+}
+
+static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int i, ret;
+
+ if (!netdev->rx_cpu_rmap) {
+ netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
+ if (!netdev->rx_cpu_rmap)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
+ tqp_vector->vector_irq);
+ if (ret) {
+ hns3_free_rx_cpu_rmap(netdev);
+ return ret;
+ }
+ }
+#endif
+ return 0;
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -351,11 +363,16 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
return ret;
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
+
/* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv);
if (ret) {
- netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
- return ret;
+ netdev_err(netdev, "init irq failed! ret=%d\n", ret);
+ goto free_rmap;
}
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
@@ -384,7 +401,8 @@ out_start_err:
hns3_vector_disable(&priv->tqp_vector[j]);
hns3_nic_uninit_irq(priv);
-
+free_rmap:
+ hns3_free_rx_cpu_rmap(netdev);
return ret;
}
@@ -429,16 +447,13 @@ static int hns3_nic_net_open(struct net_device *netdev)
ret = hns3_nic_net_up(netdev);
if (ret) {
- netdev_err(netdev,
- "hns net up fail, ret=%d!\n", ret);
+ netdev_err(netdev, "net up fail, ret=%d!\n", ret);
return ret;
}
kinfo = &h->kinfo;
- for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
- netdev_set_prio_tc_map(netdev, i,
- kinfo->prio_tc[i]);
- }
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]);
if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
@@ -447,6 +462,20 @@ static int hns3_nic_net_open(struct net_device *netdev)
return 0;
}
+static void hns3_reset_tx_queue(struct hnae3_handle *h)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct netdev_queue *dev_queue;
+ u32 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ dev_queue = netdev_get_tx_queue(ndev,
+ priv->ring_data[i].queue_index);
+ netdev_tx_reset_queue(dev_queue);
+ }
+}
+
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -467,10 +496,19 @@ static void hns3_nic_net_down(struct net_device *netdev)
if (ops->stop)
ops->stop(priv->ae_handle);
+ hns3_free_rx_cpu_rmap(netdev);
+
/* free irq resources */
hns3_nic_uninit_irq(priv);
- hns3_clear_all_ring(priv->ae_handle);
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+ * to disable the ring through firmware when downing the netdev.
+ */
+ if (!hns3_nic_resetting(netdev))
+ hns3_clear_all_ring(priv->ae_handle, false);
+
+ hns3_reset_tx_queue(priv->ae_handle);
}
static int hns3_nic_net_stop(struct net_device *netdev)
@@ -641,7 +679,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
if (l3.v4->version == 4)
l3.v4->check = 0;
- /* tunnel packet.*/
+ /* tunnel packet */
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_UDP_TUNNEL |
@@ -666,11 +704,11 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
l3.v4->check = 0;
}
- /* normal or tunnel packet*/
+ /* normal or tunnel packet */
l4_offset = l4.hdr - skb->data;
hdr_len = (l4.tcp->doff << 2) + l4_offset;
- /* remove payload length from inner pseudo checksum when tso*/
+ /* remove payload length from inner pseudo checksum when tso */
l4_paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(l4_paylen));
@@ -778,7 +816,7 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
il2_hdr = skb_inner_mac_header(skb);
- /* compute OL4 header size, defined in 4 Bytes. */
+ /* compute OL4 header size, defined in 4 Bytes */
l4_len = il2_hdr - l4.hdr;
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
@@ -913,8 +951,9 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
{
/* Config bd buffer end */
- hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
- hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
+ if (!!frag_end)
+ hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, 1U);
+ hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
}
static int hns3_fill_desc_vtags(struct sk_buff *skb,
@@ -988,7 +1027,8 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
}
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
- int size, int frag_end, enum hns_desc_type type)
+ unsigned int size, int frag_end,
+ enum hns_desc_type type)
{
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
@@ -1038,8 +1078,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec);
- desc->tx.type_cs_vlan_tso_len =
- cpu_to_le32(type_cs_vlan_tso);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
@@ -1086,19 +1125,19 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
- DESC_TYPE_SKB : DESC_TYPE_PAGE;
+ DESC_TYPE_SKB : DESC_TYPE_PAGE;
/* now, fill the descriptor */
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
- (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
+ (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
frag_end && (k == frag_buf_num - 1) ?
1 : 0);
desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
- /* move ring pointer to next.*/
+ /* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use);
desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -1452,12 +1491,10 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
start = u64_stats_fetch_begin_irq(&ring->syncp);
rx_bytes += ring->stats.rx_bytes;
rx_pkts += ring->stats.rx_pkts;
- rx_drop += ring->stats.non_vld_descs;
rx_drop += ring->stats.l2_err;
- rx_errors += ring->stats.non_vld_descs;
rx_errors += ring->stats.l2_err;
+ rx_errors += ring->stats.l3l4_csum_err;
rx_crc_errors += ring->stats.l2_err;
- rx_crc_errors += ring->stats.l3l4_csum_err;
rx_multicast += ring->stats.rx_multicast;
rx_length_errors += ring->stats.err_pkt_len;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
@@ -1493,12 +1530,12 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
static int hns3_setup_tc(struct net_device *netdev, void *type_data)
{
struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
- struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_knic_private_info *kinfo = &h->kinfo;
u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
+ struct hnae3_knic_private_info *kinfo;
u8 tc = mqprio_qopt->qopt.num_tc;
u16 mode = mqprio_qopt->mode;
u8 hw = mqprio_qopt->qopt.hw;
+ struct hnae3_handle *h;
if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
@@ -1510,6 +1547,9 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
if (!netdev)
return -EINVAL;
+ h = hns3_get_handle(netdev);
+ kinfo = &h->kinfo;
+
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
}
@@ -1527,15 +1567,11 @@ static int hns3_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret = -EIO;
if (h->ae_algo->ops->set_vlan_filter)
ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
- if (!ret)
- set_bit(vid, priv->active_vlans);
-
return ret;
}
@@ -1543,33 +1579,11 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret = -EIO;
if (h->ae_algo->ops->set_vlan_filter)
ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
- if (!ret)
- clear_bit(vid, priv->active_vlans);
-
- return ret;
-}
-
-static int hns3_restore_vlan(struct net_device *netdev)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret = 0;
- u16 vid;
-
- for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
- ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
- if (ret) {
- netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
- vid, ret);
- return ret;
- }
- }
-
return ret;
}
@@ -1581,7 +1595,7 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
- qos, vlan_proto);
+ qos, vlan_proto);
return ret;
}
@@ -1722,6 +1736,32 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
h->ae_algo->ops->reset_event(h->pdev, h);
}
+#ifdef CONFIG_RFS_ACCEL
+static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct hnae3_handle *h = hns3_get_handle(dev);
+ struct flow_keys fkeys;
+
+ if (!h->ae_algo->ops->add_arfs_entry)
+ return -EOPNOTSUPP;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
+ return -EPROTONOSUPPORT;
+
+ if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
+ fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
+ (fkeys.basic.ip_proto != IPPROTO_TCP &&
+ fkeys.basic.ip_proto != IPPROTO_UDP))
+ return -EPROTONOSUPPORT;
+
+ return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
+}
+#endif
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
@@ -1737,6 +1777,10 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = hns3_rx_flow_steer,
+#endif
+
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -1802,8 +1846,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct hnae3_ae_dev *ae_dev;
int ret;
- ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
- GFP_KERNEL);
+ ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
if (!ae_dev) {
ret = -ENOMEM;
return ret;
@@ -1811,7 +1854,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
- ae_dev->dev_type = HNAE3_DEV_KNIC;
ae_dev->reset_type = HNAE3_NONE_RESET;
hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev);
@@ -1895,9 +1937,9 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- if (!ae_dev) {
+ if (!ae_dev || !ae_dev->ops) {
dev_err(&pdev->dev,
- "Can't recover - error happened during device init\n");
+ "Can't recover - error happened before device initialized\n");
return PCI_ERS_RESULT_NONE;
}
@@ -1912,14 +1954,23 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ const struct hnae3_ae_ops *ops;
+ enum hnae3_reset_type reset_type;
struct device *dev = &pdev->dev;
- dev_info(dev, "requesting reset due to PCI error\n");
+ if (!ae_dev || !ae_dev->ops)
+ return PCI_ERS_RESULT_NONE;
+ ops = ae_dev->ops;
/* request the reset */
- if (ae_dev->ops->reset_event) {
- if (!ae_dev->override_pci_need_reset)
- ae_dev->ops->reset_event(pdev, NULL);
+ if (ops->reset_event) {
+ if (ae_dev->hw_err_reset_req) {
+ reset_type = ops->get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ ops->set_default_reset_request(ae_dev, reset_type);
+ dev_info(dev, "requesting reset due to PCI error\n");
+ ops->reset_event(pdev, NULL);
+ }
return PCI_ERS_RESULT_RECOVERED;
}
@@ -2168,7 +2219,7 @@ out_buffer_fail:
return ret;
}
-/* detach a in-used buffer and replace with a reserved one */
+/* detach a in-used buffer and replace with a reserved one */
static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
struct hns3_desc_cb *res_cb)
{
@@ -2181,8 +2232,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{
ring->desc_cb[i].reuse_flag = 0;
- ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
- + ring->desc_cb[i].page_offset);
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
}
@@ -2284,8 +2335,8 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}
-static void
-hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
+static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
+ int cleand_count)
{
struct hns3_desc_cb *desc_cb;
struct hns3_desc_cb res_cbs;
@@ -2338,7 +2389,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Avoid re-using remote pages, or the stack is still using the page
* when page_offset rollback to zero, flag default unreuse
*/
- if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) ||
+ if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) ||
(!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
return;
@@ -2347,7 +2398,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
desc_cb->reuse_flag = 1;
- /* Bump ref count on page before it is given*/
+ /* Bump ref count on page before it is given */
get_page(desc_cb->priv);
} else if (page_count(desc_cb->priv) == 1) {
desc_cb->reuse_flag = 1;
@@ -2356,13 +2407,13 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
}
}
-static int hns3_gro_complete(struct sk_buff *skb)
+static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
{
__be16 type = skb->protocol;
struct tcphdr *th;
int depth = 0;
- while (type == htons(ETH_P_8021Q)) {
+ while (eth_type_vlan(type)) {
struct vlan_hdr *vh;
if ((depth + VLAN_HLEN) > skb_headlen(skb))
@@ -2373,10 +2424,24 @@ static int hns3_gro_complete(struct sk_buff *skb)
depth += VLAN_HLEN;
}
+ skb_set_network_header(skb, depth);
+
if (type == htons(ETH_P_IP)) {
+ const struct iphdr *iph = ip_hdr(skb);
+
depth += sizeof(struct iphdr);
+ skb_set_transport_header(skb, depth);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
+ iph->daddr, 0);
} else if (type == htons(ETH_P_IPV6)) {
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+
depth += sizeof(struct ipv6hdr);
+ skb_set_transport_header(skb, depth);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
+ &iph->daddr, 0);
} else {
netdev_err(skb->dev,
"Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
@@ -2384,13 +2449,16 @@ static int hns3_gro_complete(struct sk_buff *skb)
return -EFAULT;
}
- th = (struct tcphdr *)(skb->data + depth);
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+ skb->csum_start = (unsigned char *)th - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ skb->ip_summed = CHECKSUM_PARTIAL;
return 0;
}
@@ -2508,7 +2576,7 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
-static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
+static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
unsigned char *va)
{
#define HNS3_NEED_ADD_FRAG 1
@@ -2537,7 +2605,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
/* We can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+ if (likely(page_to_nid(desc_cb->priv) == numa_mem_id()))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
put_page(desc_cb->priv);
@@ -2574,7 +2642,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
*/
if (pending) {
pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
- ring->desc_num;
+ ring->desc_num;
pre_desc = &ring->desc[pre_bd];
bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
} else {
@@ -2628,21 +2696,22 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 l234info,
u32 bd_base_info, u32 ol_info)
{
- u16 gro_count;
u32 l3_type;
- gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
- HNS3_RXD_GRO_COUNT_S);
+ skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
+ HNS3_RXD_GRO_SIZE_M,
+ HNS3_RXD_GRO_SIZE_S);
/* if there is no HW GRO, do not set gro params */
- if (!gro_count) {
+ if (!skb_shinfo(skb)->gso_size) {
hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
return 0;
}
- NAPI_GRO_CB(skb)->count = gro_count;
+ NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
+ HNS3_RXD_GRO_COUNT_M,
+ HNS3_RXD_GRO_COUNT_S);
- l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
- HNS3_RXD_L3ID_S);
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
if (l3_type == HNS3_L3_TYPE_IPV4)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
else if (l3_type == HNS3_L3_TYPE_IPV6)
@@ -2650,11 +2719,7 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
else
return -EFAULT;
- skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
- HNS3_RXD_GRO_SIZE_M,
- HNS3_RXD_GRO_SIZE_S);
-
- return hns3_gro_complete(skb);
+ return hns3_gro_complete(skb, l234info);
}
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
@@ -2703,14 +2768,6 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
vlan_tag);
}
- if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.non_vld_descs++;
- u64_stats_update_end(&ring->syncp);
-
- return -EINVAL;
- }
-
if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
BIT(HNS3_RXD_L2E_B))))) {
u64_stats_update_begin(&ring->syncp);
@@ -2762,8 +2819,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
+ unsigned int length;
u32 bd_base_info;
- int length;
int ret;
desc = &ring->desc[ring->next_to_clean];
@@ -2828,14 +2885,14 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return ret;
}
+ skb_record_rx_queue(skb, ring->tqp->tqp_index);
*out_skb = skb;
return 0;
}
-int hns3_clean_rx_ring(
- struct hns3_enet_ring *ring, int budget,
- void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
+int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
+ void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int recv_pkts, recv_bds, clean_count, err;
@@ -2887,42 +2944,25 @@ int hns3_clean_rx_ring(
out:
/* Make all data has been write before submit */
if (clean_count + unused_count > 0)
- hns3_nic_alloc_rx_buffers(ring,
- clean_count + unused_count);
+ hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count);
return recv_pkts;
}
-static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
+static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)
{
- struct hns3_enet_tqp_vector *tqp_vector =
- ring_group->ring->tqp_vector;
+#define HNS3_RX_LOW_BYTE_RATE 10000
+#define HNS3_RX_MID_BYTE_RATE 20000
+#define HNS3_RX_ULTRA_PACKET_RATE 40
+
enum hns3_flow_level_range new_flow_level;
- int packets_per_msecs;
- int bytes_per_msecs;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int packets_per_msecs, bytes_per_msecs;
u32 time_passed_ms;
- u16 new_int_gl;
-
- if (!tqp_vector->last_jiffies)
- return false;
-
- if (ring_group->total_packets == 0) {
- ring_group->coal.int_gl = HNS3_INT_GL_50K;
- ring_group->coal.flow_level = HNS3_FLOW_LOW;
- return true;
- }
- /* Simple throttlerate management
- * 0-10MB/s lower (50000 ints/s)
- * 10-20MB/s middle (20000 ints/s)
- * 20-1249MB/s high (18000 ints/s)
- * > 40000pps ultra (8000 ints/s)
- */
- new_flow_level = ring_group->coal.flow_level;
- new_int_gl = ring_group->coal.int_gl;
+ tqp_vector = ring_group->ring->tqp_vector;
time_passed_ms =
jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
-
if (!time_passed_ms)
return false;
@@ -2932,9 +2972,14 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
do_div(ring_group->total_bytes, time_passed_ms);
bytes_per_msecs = ring_group->total_bytes;
-#define HNS3_RX_LOW_BYTE_RATE 10000
-#define HNS3_RX_MID_BYTE_RATE 20000
+ new_flow_level = ring_group->coal.flow_level;
+ /* Simple throttlerate management
+ * 0-10MB/s lower (50000 ints/s)
+ * 10-20MB/s middle (20000 ints/s)
+ * 20-1249MB/s high (18000 ints/s)
+ * > 40000pps ultra (8000 ints/s)
+ */
switch (new_flow_level) {
case HNS3_FLOW_LOW:
if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
@@ -2954,13 +2999,40 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
break;
}
-#define HNS3_RX_ULTRA_PACKET_RATE 40
-
if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
&tqp_vector->rx_group == ring_group)
new_flow_level = HNS3_FLOW_ULTRA;
- switch (new_flow_level) {
+ ring_group->total_bytes = 0;
+ ring_group->total_packets = 0;
+ ring_group->coal.flow_level = new_flow_level;
+
+ return true;
+}
+
+static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
+{
+ struct hns3_enet_tqp_vector *tqp_vector;
+ u16 new_int_gl;
+
+ if (!ring_group->ring)
+ return false;
+
+ tqp_vector = ring_group->ring->tqp_vector;
+ if (!tqp_vector->last_jiffies)
+ return false;
+
+ if (ring_group->total_packets == 0) {
+ ring_group->coal.int_gl = HNS3_INT_GL_50K;
+ ring_group->coal.flow_level = HNS3_FLOW_LOW;
+ return true;
+ }
+
+ if (!hns3_get_new_flow_lvl(ring_group))
+ return false;
+
+ new_int_gl = ring_group->coal.int_gl;
+ switch (ring_group->coal.flow_level) {
case HNS3_FLOW_LOW:
new_int_gl = HNS3_INT_GL_50K;
break;
@@ -2977,9 +3049,6 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
break;
}
- ring_group->total_bytes = 0;
- ring_group->total_packets = 0;
- ring_group->coal.flow_level = new_flow_level;
if (new_int_gl != ring_group->coal.int_gl) {
ring_group->coal.int_gl = new_int_gl;
return true;
@@ -3280,6 +3349,7 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
if (!vector)
return -ENOMEM;
+ /* save the actual available vector number */
vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
priv->vector_num = vector_num;
@@ -3331,8 +3401,6 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
- irq_set_affinity_notifier(tqp_vector->vector_irq,
- NULL);
irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
free_irq(tqp_vector->vector_irq, tqp_vector);
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
@@ -3364,7 +3432,7 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
}
static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
- int ring_type)
+ unsigned int ring_type)
{
struct hns3_nic_ring_data *ring_data = priv->ring_data;
int queue_num = priv->ae_handle->kinfo.num_tqps;
@@ -3550,8 +3618,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
struct hnae3_queue *q = ring->tqp;
if (!HNAE3_IS_TX_RING(ring)) {
- hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
- (u32)dma);
+ hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
@@ -3851,6 +3918,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_client_stop(handle);
+ hns3_uninit_phy(netdev);
+
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
goto out_netdev_free;
@@ -3858,9 +3927,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_del_all_fd_rules(netdev, true);
- hns3_force_clear_all_rx_ring(handle);
-
- hns3_uninit_phy(netdev);
+ hns3_clear_all_ring(handle, true);
hns3_nic_uninit_vector_data(priv);
@@ -3997,8 +4064,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
ret);
return ret;
}
- hns3_replace_buffer(ring, ring->next_to_use,
- &res_cbs);
+ hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
}
ring_ptr_move_fw(ring, next_to_use);
}
@@ -4030,40 +4096,26 @@ static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
}
}
-static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
+static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
{
struct net_device *ndev = h->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hns3_enet_ring *ring;
u32 i;
for (i = 0; i < h->kinfo.num_tqps; i++) {
- ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
- hns3_force_clear_rx_ring(ring);
- }
-}
-
-static void hns3_clear_all_ring(struct hnae3_handle *h)
-{
- struct net_device *ndev = h->kinfo.netdev;
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- u32 i;
-
- for (i = 0; i < h->kinfo.num_tqps; i++) {
- struct netdev_queue *dev_queue;
struct hns3_enet_ring *ring;
ring = priv->ring_data[i].ring;
hns3_clear_tx_ring(ring);
- dev_queue = netdev_get_tx_queue(ndev,
- priv->ring_data[i].queue_index);
- netdev_tx_reset_queue(dev_queue);
ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
/* Continue to clear other rings even if clearing some
* rings failed.
*/
- hns3_clear_rx_ring(ring);
+ if (force)
+ hns3_force_clear_rx_ring(ring);
+ else
+ hns3_clear_rx_ring(ring);
}
}
@@ -4173,7 +4225,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
if (ret) {
set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
netdev_err(kinfo->netdev,
- "hns net up fail, ret=%d!\n", ret);
+ "net up fail, ret=%d!\n", ret);
return ret;
}
}
@@ -4251,12 +4303,8 @@ static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(netdev, vlan_filter_enable);
- /* Hardware table is only clear when pf resets */
- if (!(handle->flags & HNAE3_SUPPORT_VF)) {
- ret = hns3_restore_vlan(netdev);
- if (ret)
- return ret;
- }
+ if (handle->ae_algo->ops->restore_vlan_table)
+ handle->ae_algo->ops->restore_vlan_table(handle);
return hns3_restore_fd_rules(netdev);
}
@@ -4272,7 +4320,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
return 0;
}
- hns3_force_clear_all_rx_ring(handle);
+ hns3_clear_all_ring(handle, true);
+ hns3_reset_tx_queue(priv->ae_handle);
hns3_nic_uninit_vector_data(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index c14480f9b625..848b866761df 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -145,7 +145,7 @@ enum hns3_nic_state {
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
#define HNS3_RXD_LKBK_B 15
#define HNS3_RXD_GRO_SIZE_S 16
-#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
+#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
#define HNS3_TXD_L3T_S 0
#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
@@ -384,7 +384,6 @@ struct ring_stats {
u64 rx_err_cnt;
u64 reuse_pg_cnt;
u64 err_pkt_len;
- u64 non_vld_descs;
u64 err_bd_num;
u64 l2_err;
u64 l3l4_csum_err;
@@ -417,7 +416,7 @@ struct hns3_enet_ring {
*/
int next_to_clean;
- int pull_len; /* head length for current packet */
+ u32 pull_len; /* head length for current packet */
u32 frag_num;
unsigned char *va; /* first buffer address for current packet */
@@ -446,25 +445,6 @@ enum hns3_flow_level_range {
HNS3_FLOW_ULTRA = 3,
};
-enum hns3_link_mode_bits {
- HNS3_LM_FIBRE_BIT = BIT(0),
- HNS3_LM_AUTONEG_BIT = BIT(1),
- HNS3_LM_TP_BIT = BIT(2),
- HNS3_LM_PAUSE_BIT = BIT(3),
- HNS3_LM_BACKPLANE_BIT = BIT(4),
- HNS3_LM_10BASET_HALF_BIT = BIT(5),
- HNS3_LM_10BASET_FULL_BIT = BIT(6),
- HNS3_LM_100BASET_HALF_BIT = BIT(7),
- HNS3_LM_100BASET_FULL_BIT = BIT(8),
- HNS3_LM_1000BASET_FULL_BIT = BIT(9),
- HNS3_LM_10000BASEKR_FULL_BIT = BIT(10),
- HNS3_LM_25000BASEKR_FULL_BIT = BIT(11),
- HNS3_LM_40000BASELR4_FULL_BIT = BIT(12),
- HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13),
- HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14),
- HNS3_LM_COUNT = 15
-};
-
#define HNS3_INT_GL_MAX 0x1FE0
#define HNS3_INT_GL_50K 0x0014
#define HNS3_INT_GL_20K 0x0032
@@ -550,7 +530,6 @@ struct hns3_nic_priv {
struct notifier_block notifier_block;
/* Vxlan/Geneve information */
struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX];
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct hns3_enet_coalesce tx_coal;
struct hns3_enet_coalesce rx_coal;
};
@@ -631,7 +610,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hnae3_buf_size(_ring) ((_ring)->buf_size)
#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
-#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring))
+#define hnae3_page_size(_ring) (PAGE_SIZE << (u32)hnae3_page_order(_ring))
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index d1588ea6132c..5bff98a9b0dc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -44,7 +44,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
HNS3_TQP_STAT("errors", rx_err_cnt),
HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
- HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
HNS3_TQP_STAT("err_bd_num", err_bd_num),
HNS3_TQP_STAT("l2_err", l2_err),
HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
@@ -60,6 +59,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
+#define HNS3_NIC_LB_SETUP_USEC 10000
/* Nic loopback test err */
#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
@@ -117,7 +117,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret;
ret = hns3_lp_setup(ndev, loop_mode, true);
- usleep_range(10000, 20000);
+ usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
return ret;
}
@@ -132,7 +132,7 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret;
}
- usleep_range(10000, 20000);
+ usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
return 0;
}
@@ -149,6 +149,12 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
+
+ /* The dst mac addr of loopback packet is the same as the host'
+ * mac addr, the SSU component may loop back the packet to host
+ * before the packet reaches mac or serdes, which will defect
+ * the purpose of mac or serdes selftest.
+ */
ethh->h_dest[5] += 0x1f;
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
@@ -243,11 +249,13 @@ static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
skb_get(skb);
tx_ret = hns3_nic_net_xmit(skb, ndev);
- if (tx_ret == NETDEV_TX_OK)
+ if (tx_ret == NETDEV_TX_OK) {
good_cnt++;
- else
+ } else {
+ kfree_skb(skb);
netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n",
tx_ret);
+ }
}
if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR;
@@ -327,6 +335,13 @@ static void hns3_self_test(struct net_device *ndev,
h->ae_algo->ops->enable_vlan_filter(h, false);
#endif
+ /* Tell firmware to stop mac autoneg before loopback test start,
+ * otherwise loopback test may be failed when the port is still
+ * negotiating.
+ */
+ if (h->ae_algo->ops->halt_autoneg)
+ h->ae_algo->ops->halt_autoneg(h, true);
+
set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
@@ -349,6 +364,9 @@ static void hns3_self_test(struct net_device *ndev,
clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
+ if (h->ae_algo->ops->halt_autoneg)
+ h->ae_algo->ops->halt_autoneg(h, false);
+
#if IS_ENABLED(CONFIG_VLAN_8021Q)
if (dis_vlan_filter)
h->ae_algo->ops->enable_vlan_filter(h, true);
@@ -435,7 +453,7 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
buff = hns3_get_strings_tqps(h, buff);
- h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff);
+ ops->get_strings(h, stringset, (u8 *)buff);
break;
case ETH_SS_TEST:
ops->get_strings(h, stringset, data);
@@ -510,6 +528,11 @@ static void hns3_get_drvinfo(struct net_device *netdev,
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
+ if (!h->ae_algo->ops->get_fw_version) {
+ netdev_err(netdev, "could not get fw version!\n");
+ return;
+ }
+
strncpy(drvinfo->version, hns3_driver_version,
sizeof(drvinfo->version));
drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
@@ -530,7 +553,7 @@ static u32 hns3_get_link(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status)
+ if (h->ae_algo->ops->get_status)
return h->ae_algo->ops->get_status(h);
else
return 0;
@@ -560,7 +583,7 @@ static void hns3_get_pauseparam(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam)
+ if (h->ae_algo->ops->get_pauseparam)
h->ae_algo->ops->get_pauseparam(h, &param->autoneg,
&param->rx_pause, &param->tx_pause);
}
@@ -610,9 +633,6 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
u8 media_type;
u8 link_stat;
- if (!h->ae_algo || !h->ae_algo->ops)
- return -EOPNOTSUPP;
-
ops = h->ae_algo->ops;
if (ops->get_media_type)
ops->get_media_type(h, &media_type, &module_type);
@@ -740,8 +760,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops ||
- !h->ae_algo->ops->get_rss_key_size)
+ if (!h->ae_algo->ops->get_rss_key_size)
return 0;
return h->ae_algo->ops->get_rss_key_size(h);
@@ -751,8 +770,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops ||
- !h->ae_algo->ops->get_rss_indir_size)
+ if (!h->ae_algo->ops->get_rss_indir_size)
return 0;
return h->ae_algo->ops->get_rss_indir_size(h);
@@ -763,7 +781,7 @@ static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss)
+ if (!h->ae_algo->ops->get_rss)
return -EOPNOTSUPP;
return h->ae_algo->ops->get_rss(h, indir, key, hfunc);
@@ -774,7 +792,7 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss)
+ if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
if ((h->pdev->revision == 0x20 &&
@@ -799,9 +817,6 @@ static int hns3_get_rxnfc(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops)
- return -EOPNOTSUPP;
-
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = h->kinfo.num_tqps;
@@ -915,9 +930,6 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops)
- return -EOPNOTSUPP;
-
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
if (h->ae_algo->ops->set_rss_tuple)
@@ -1193,7 +1205,7 @@ static int hns3_set_phys_id(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id)
+ if (!h->ae_algo->ops->set_led_id)
return -EOPNOTSUPP;
return h->ae_algo->ops->set_led_id(h, state);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index fbd904e3077c..22f6acd45d9a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -110,8 +110,7 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
- (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
- HCLGE_NIC_CMQ_ENABLE);
+ ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
} else {
@@ -120,8 +119,7 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
- (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
- HCLGE_NIC_CMQ_ENABLE);
+ ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
@@ -175,7 +173,11 @@ static bool hclge_is_special_opcode(u16 opcode)
HCLGE_OPC_STATS_MAC,
HCLGE_OPC_STATS_MAC_ALL,
HCLGE_OPC_QUERY_32_BIT_REG,
- HCLGE_OPC_QUERY_64_BIT_REG};
+ HCLGE_OPC_QUERY_64_BIT_REG,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
@@ -186,12 +188,43 @@ static bool hclge_is_special_opcode(u16 opcode)
return false;
}
+static int hclge_cmd_convert_err_code(u16 desc_ret)
+{
+ switch (desc_ret) {
+ case HCLGE_CMD_EXEC_SUCCESS:
+ return 0;
+ case HCLGE_CMD_NO_AUTH:
+ return -EPERM;
+ case HCLGE_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HCLGE_CMD_QUEUE_FULL:
+ return -EXFULL;
+ case HCLGE_CMD_NEXT_ERR:
+ return -ENOSR;
+ case HCLGE_CMD_UNEXE_ERR:
+ return -ENOTBLK;
+ case HCLGE_CMD_PARA_ERR:
+ return -EINVAL;
+ case HCLGE_CMD_RESULT_ERR:
+ return -ERANGE;
+ case HCLGE_CMD_TIMEOUT:
+ return -ETIME;
+ case HCLGE_CMD_HILINK_ERR:
+ return -ENOLINK;
+ case HCLGE_CMD_QUEUE_ILLEGAL:
+ return -ENXIO;
+ case HCLGE_CMD_INVALID:
+ return -EBADR;
+ default:
+ return -EIO;
+ }
+}
+
static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
int num, int ntc)
{
u16 opcode, desc_ret;
int handle;
- int retval;
opcode = le16_to_cpu(desc[0].opcode);
for (handle = 0; handle < num; handle++) {
@@ -205,17 +238,9 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
else
desc_ret = le16_to_cpu(desc[0].retval);
- if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
- retval = 0;
- else if (desc_ret == HCLGE_CMD_NO_AUTH)
- retval = -EPERM;
- else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
- retval = -EOPNOTSUPP;
- else
- retval = -EIO;
hw->cmq.last_status = desc_ret;
- return retval;
+ return hclge_cmd_convert_err_code(desc_ret);
}
/**
@@ -230,6 +255,7 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
+ struct hclge_cmq_ring *csq = &hw->cmq.csq;
struct hclge_desc *desc_to_use;
bool complete = false;
u32 timeout = 0;
@@ -239,8 +265,16 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclge_ring_space(&hw->cmq.csq) ||
- test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ if (num > hclge_ring_space(&hw->cmq.csq)) {
+ /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
+ * need update the SW HEAD pointer csq->next_to_clean
+ */
+ csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -278,7 +312,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
}
if (!complete) {
- retval = -EAGAIN;
+ retval = -EBADE;
} else {
retval = hclge_cmd_check_retval(hw, desc, num, ntc);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d79a209b80f6..96840d8f3e24 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -41,6 +41,14 @@ enum hclge_cmd_return_status {
HCLGE_CMD_NO_AUTH = 1,
HCLGE_CMD_NOT_SUPPORTED = 2,
HCLGE_CMD_QUEUE_FULL = 3,
+ HCLGE_CMD_NEXT_ERR = 4,
+ HCLGE_CMD_UNEXE_ERR = 5,
+ HCLGE_CMD_PARA_ERR = 6,
+ HCLGE_CMD_RESULT_ERR = 7,
+ HCLGE_CMD_TIMEOUT = 8,
+ HCLGE_CMD_HILINK_ERR = 9,
+ HCLGE_CMD_QUEUE_ILLEGAL = 10,
+ HCLGE_CMD_INVALID = 11,
};
enum hclge_cmd_status {
@@ -180,6 +188,9 @@ enum hclge_opcode_type {
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
+ /* PPU commands */
+ HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
+
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
@@ -243,6 +254,9 @@ enum hclge_opcode_type {
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+ /* M7 stats command */
+ HCLGE_OPC_M7_STATS_BD = 0x7012,
+ HCLGE_OPC_M7_STATS_INFO = 0x7013,
/* SFP command */
HCLGE_OPC_GET_SFP_INFO = 0x7104,
@@ -265,6 +279,8 @@ enum hclge_opcode_type {
HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
+ HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
+ HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
HCLGE_IGU_COMMON_INT_EN = 0x1806,
HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
@@ -641,6 +657,11 @@ enum hclge_mac_vlan_tbl_opcode {
HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
};
+enum hclge_mac_vlan_add_resp_code {
+ HCLGE_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */
+ HCLGE_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */
+};
+
#define HCLGE_MAC_VLAN_BIT0_EN_B 0
#define HCLGE_MAC_VLAN_BIT1_EN_B 1
#define HCLGE_MAC_EPORT_SW_EN_B 12
@@ -674,7 +695,6 @@ struct hclge_umv_spc_alc_cmd {
#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
#define HCLGE_MAC_MGR_MASK_MAC_B BIT(1)
#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B BIT(2)
-#define HCLGE_MAC_ETHERTYPE_LLDP 0x88cc
struct hclge_mac_mgr_tbl_entry_cmd {
u8 flags;
@@ -872,7 +892,7 @@ struct hclge_serdes_lb_cmd {
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
-#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x200 /* 512 byte */
+#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */
#define HCLGE_TYPE_CRQ 0
#define HCLGE_TYPE_CSQ 1
@@ -970,6 +990,25 @@ struct hclge_fd_ad_config_cmd {
u8 rsv2[8];
};
+struct hclge_get_m7_bd_cmd {
+ __le32 bd_num;
+ u8 rsv[20];
+};
+
+struct hclge_query_ppu_pf_other_int_dfx_cmd {
+ __le16 over_8bd_no_fe_qid;
+ __le16 over_8bd_no_fe_vf_id;
+ __le16 tso_mss_cmp_min_err_qid;
+ __le16 tso_mss_cmp_min_err_vf_id;
+ __le16 tso_mss_cmp_max_err_qid;
+ __le16 tso_mss_cmp_max_err_vf_id;
+ __le16 tx_rd_fbd_poison_qid;
+ __le16 tx_rd_fbd_poison_vf_id;
+ __le16 rx_rd_fbd_poison_qid;
+ __le16 rx_rd_fbd_poison_vf_id;
+ u8 rsv[4];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 1161361a973b..bac4ce13f6ae 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -325,6 +325,8 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hdev->tm_info.hw_pfc_map = pfc_map;
hdev->tm_info.pfc_en = pfc->pfc_en;
+ hclge_tm_pfc_info_update(hdev);
+
return hclge_pause_setup_hw(hdev, false);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index a9ffb57c4607..ab625c757a95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -61,9 +61,11 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
struct hclge_dbg_dfx_message *dfx_message,
- char *cmd_buf, int msg_num, int offset,
- enum hclge_opcode_type cmd)
+ const char *cmd_buf, int msg_num,
+ int offset, enum hclge_opcode_type cmd)
{
+#define BD_DATA_NUM 6
+
struct hclge_desc *desc_src;
struct hclge_desc *desc;
int bd_num, buf_len;
@@ -92,14 +94,16 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return;
}
- max = (bd_num * 6) <= msg_num ? (bd_num * 6) : msg_num;
+ max = (bd_num * BD_DATA_NUM) <= msg_num ?
+ (bd_num * BD_DATA_NUM) : msg_num;
desc = desc_src;
for (i = 0; i < max; i++) {
- (((i / 6) > 0) && ((i % 6) == 0)) ? desc++ : desc;
+ ((i > 0) && ((i % BD_DATA_NUM) == 0)) ? desc++ : desc;
if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
- dfx_message->message, desc->data[i % 6]);
+ dfx_message->message,
+ desc->data[i % BD_DATA_NUM]);
dfx_message++;
}
@@ -107,7 +111,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
kfree(desc_src);
}
-static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *cmd_buf)
+static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_dbg_bitmap_cmd *bitmap;
@@ -207,7 +211,7 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *cmd_buf)
dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]);
}
-static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf)
+static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
{
int msg_num;
@@ -395,7 +399,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -403,7 +407,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -412,9 +416,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
goto err_tm_pg_cmd_send;
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n",
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
bp_to_qs_map_cmd->tc_id);
- dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n",
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
bp_to_qs_map_cmd->qs_group_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
bp_to_qs_map_cmd->qs_bit_map);
@@ -473,7 +477,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
- dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n",
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
nq_to_qs_map->qset_id);
cmd = HCLGE_OPC_TM_PG_WEIGHT;
@@ -537,7 +541,8 @@ err_tm_cmd_send:
cmd, ret);
}
-static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf)
+static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
+ const char *cmd_buf)
{
struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
@@ -921,11 +926,67 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
hdev->rst_stats.reset_cnt);
}
+void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
+{
+ struct hclge_desc *desc_src, *desc_tmp;
+ struct hclge_get_m7_bd_cmd *req;
+ struct hclge_desc desc;
+ u32 bd_num, buf_len;
+ int ret, i;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);
+
+ req = (struct hclge_get_m7_bd_cmd *)desc.data;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "get firmware statistics bd number failed, ret=%d\n",
+ ret);
+ return;
+ }
+
+ bd_num = le32_to_cpu(req->bd_num);
+
+ buf_len = sizeof(struct hclge_desc) * bd_num;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ dev_err(&hdev->pdev->dev,
+ "allocate desc for get_m7_stats failed\n");
+ return;
+ }
+
+ desc_tmp = desc_src;
+ ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
+ HCLGE_OPC_M7_STATS_INFO);
+ if (ret) {
+ kfree(desc_src);
+ dev_err(&hdev->pdev->dev,
+ "get firmware statistics failed, ret=%d\n", ret);
+ return;
+ }
+
+ for (i = 0; i < bd_num; i++) {
+ dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
+ le32_to_cpu(desc_tmp->data[0]),
+ le32_to_cpu(desc_tmp->data[1]),
+ le32_to_cpu(desc_tmp->data[2]));
+ dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
+ le32_to_cpu(desc_tmp->data[3]),
+ le32_to_cpu(desc_tmp->data[4]),
+ le32_to_cpu(desc_tmp->data[5]));
+
+ desc_tmp++;
+ }
+
+ kfree(desc_src);
+}
+
/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
* @hdev: pointer to struct hclge_dev
* @cmd_buf: string that contains offset and length
*/
-static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *cmd_buf)
+static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
+ const char *cmd_buf)
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
@@ -998,13 +1059,13 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
- dev_info(&hdev->pdev->dev, "[%07lu.%03lu]status = 0x%x\n",
+ dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
(unsigned long)stats.time, rem_nsec / 1000,
stats.status);
}
}
-int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -1029,6 +1090,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
hclge_dbg_dump_rst_info(hdev);
+ } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
+ hclge_dbg_get_m7_stats_info(hdev);
} else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
hclge_dbg_dump_ncl_config(hdev,
&cmd_buf[sizeof("dump ncl_config")]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 4ac80634c984..0a7243825e7b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -87,25 +87,25 @@ static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
static const struct hclge_hw_error hclge_igu_int[] = {
{ .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
{ .int_msk = BIT(0), .msg = "rx_buf_overflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "tx_buf_overflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(4), .msg = "tx_buf_underrun",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
@@ -413,13 +413,13 @@ static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
{ .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
@@ -631,29 +631,20 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
{ /* sentinel */ }
};
-static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg,
- const struct hclge_hw_error *err,
- u32 err_sts)
+static void hclge_log_error(struct device *dev, char *reg,
+ const struct hclge_hw_error *err,
+ u32 err_sts, unsigned long *reset_requests)
{
- enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET;
- bool need_reset = false;
-
while (err->msg) {
if (err->int_msk & err_sts) {
dev_warn(dev, "%s %s found [error status=0x%x]\n",
reg, err->msg, err_sts);
- if (err->reset_level != HNAE3_NONE_RESET &&
- err->reset_level >= reset_level) {
- reset_level = err->reset_level;
- need_reset = true;
- }
+ if (err->reset_level &&
+ err->reset_level != HNAE3_NONE_RESET)
+ set_bit(err->reset_level, reset_requests);
}
err++;
}
- if (need_reset)
- return reset_level;
- else
- return HNAE3_NONE_RESET;
}
/* hclge_cmd_query_error: read the error information
@@ -673,19 +664,19 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
enum hclge_err_int_type int_type)
{
struct device *dev = &hdev->pdev->dev;
- int num = 1;
+ int desc_num = 1;
int ret;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
if (flag) {
desc[0].flag |= cpu_to_le16(flag);
hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
- num = 2;
+ desc_num = 2;
}
if (w_num)
desc[0].data[w_num] = cpu_to_le32(int_type);
- ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
if (ret)
dev_err(dev, "query error cmd failed (%d)\n", ret);
@@ -941,7 +932,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2];
- int num = 1;
+ int desc_num = 1;
int ret;
/* configure PPU error interrupts */
@@ -960,7 +951,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
- num = 2;
+ desc_num = 2;
} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en)
@@ -978,7 +969,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
return -EINVAL;
}
- ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
return ret;
}
@@ -1069,12 +1060,51 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
return ret;
}
-#define HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type) \
- do { \
- if (ae_dev->ops->set_default_reset_request) \
- ae_dev->ops->set_default_reset_request(ae_dev, \
- reset_type); \
- } while (0)
+/* hclge_query_bd_num: query number of buffer descriptors
+ * @hdev: pointer to struct hclge_dev
+ * @is_ras: true for ras, false for msix
+ * @mpf_bd_num: number of main PF interrupt buffer descriptors
+ * @pf_bd_num: number of not main PF interrupt buffer descriptors
+ *
+ * This function querys number of mpf and pf buffer descriptors.
+ */
+static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
+ int *mpf_bd_num, int *pf_bd_num)
+{
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_min_bd_num, pf_min_bd_num;
+ enum hclge_opcode_type opcode;
+ struct hclge_desc desc_bd;
+ int ret;
+
+ if (is_ras) {
+ opcode = HCLGE_QUERY_RAS_INT_STS_BD_NUM;
+ mpf_min_bd_num = HCLGE_MPF_RAS_INT_MIN_BD_NUM;
+ pf_min_bd_num = HCLGE_PF_RAS_INT_MIN_BD_NUM;
+ } else {
+ opcode = HCLGE_QUERY_MSIX_INT_STS_BD_NUM;
+ mpf_min_bd_num = HCLGE_MPF_MSIX_INT_MIN_BD_NUM;
+ pf_min_bd_num = HCLGE_PF_MSIX_INT_MIN_BD_NUM;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc_bd, opcode, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
+ if (ret) {
+ dev_err(dev, "fail(%d) to query msix int status bd num\n",
+ ret);
+ return ret;
+ }
+
+ *mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
+ *pf_bd_num = le32_to_cpu(desc_bd.data[1]);
+ if (*mpf_bd_num < mpf_min_bd_num || *pf_bd_num < pf_min_bd_num) {
+ dev_err(dev, "Invalid bd num: mpf(%d), pf(%d)\n",
+ *mpf_bd_num, *pf_bd_num);
+ return -EINVAL;
+ }
+
+ return 0;
+}
/* hclge_handle_mpf_ras_error: handle all main PF RAS errors
* @hdev: pointer to struct hclge_dev
@@ -1089,7 +1119,6 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
int num)
{
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
- enum hnae3_reset_type reset_level;
struct device *dev = &hdev->pdev->dev;
__le32 *desc_data;
u32 status;
@@ -1098,8 +1127,6 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
/* query all main PF RAS errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret) {
dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret);
@@ -1108,95 +1135,74 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
/* log HNS common errors */
status = le32_to_cpu(desc[0].data[0]);
- if (status) {
- reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
- &hclge_imp_tcm_ecc_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
+ &hclge_imp_tcm_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(desc[0].data[1]);
- if (status) {
- reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
- &hclge_cmdq_nic_mem_ecc_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
+ &hclge_cmdq_nic_mem_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
- if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
+ if ((le32_to_cpu(desc[0].data[2])) & BIT(0))
dev_warn(dev, "imp_rd_data_poison_err found\n");
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET);
- }
status = le32_to_cpu(desc[0].data[3]);
- if (status) {
- reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
- &hclge_tqp_int_ecc_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
+ &hclge_tqp_int_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(desc[0].data[4]);
- if (status) {
- reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS",
- &hclge_msix_sram_ecc_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "MSIX_ECC_INT_STS",
+ &hclge_msix_sram_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* log SSU(Storage Switch Unit) errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*(desc_data + 2));
- if (status) {
- reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
- &hclge_ssu_mem_ecc_err_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
+ &hclge_ssu_mem_ecc_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT",
- &hclge_ssu_com_err_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "SSU_COMMON_ERR_INT",
+ &hclge_ssu_com_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* log IGU(Ingress Unit) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "IGU_INT_STS",
- &hclge_igu_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "IGU_INT_STS",
+ &hclge_igu_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* log PPP(Programmable Packet Process) errors */
desc_data = (__le32 *)&desc[4];
status = le32_to_cpu(*(desc_data + 1));
- if (status) {
- reset_level =
- hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
- &hclge_ppp_mpf_abnormal_int_st1[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
+ &hclge_ppp_mpf_abnormal_int_st1[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
- if (status) {
- reset_level =
- hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
- &hclge_ppp_mpf_abnormal_int_st3[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppp_mpf_abnormal_int_st3[0], status,
+ &ae_dev->hw_err_reset_req);
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[5];
@@ -1204,66 +1210,53 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
if (status) {
dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
"rpu_rx_pkt_ecc_mbit_err");
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
status = le32_to_cpu(*(desc_data + 2));
- if (status) {
- reset_level =
- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
- &hclge_ppu_mpf_abnormal_int_st2[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+ &hclge_ppu_mpf_abnormal_int_st2[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
- if (status) {
- reset_level =
- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
- &hclge_ppu_mpf_abnormal_int_st3[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppu_mpf_abnormal_int_st3[0], status,
+ &ae_dev->hw_err_reset_req);
/* log TM(Traffic Manager) errors */
desc_data = (__le32 *)&desc[6];
status = le32_to_cpu(*desc_data);
- if (status) {
- reset_level = hclge_log_error(dev, "TM_SCH_RINT",
- &hclge_tm_sch_rint[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "TM_SCH_RINT",
+ &hclge_tm_sch_rint[0], status,
+ &ae_dev->hw_err_reset_req);
/* log QCN(Quantized Congestion Control) errors */
desc_data = (__le32 *)&desc[7];
status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "QCN_FIFO_RINT",
- &hclge_qcn_fifo_rint[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "QCN_FIFO_RINT",
+ &hclge_qcn_fifo_rint[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "QCN_ECC_RINT",
- &hclge_qcn_ecc_rint[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "QCN_ECC_RINT",
+ &hclge_qcn_ecc_rint[0], status,
+ &ae_dev->hw_err_reset_req);
/* log NCSI errors */
desc_data = (__le32 *)&desc[9];
status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT",
- &hclge_ncsi_err_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "NCSI_ECC_INT_RPT",
+ &hclge_ncsi_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* clear all main PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
@@ -1285,7 +1278,6 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
{
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct device *dev = &hdev->pdev->dev;
- enum hnae3_reset_type reset_level;
__le32 *desc_data;
u32 status;
int ret;
@@ -1293,8 +1285,6 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* query all PF RAS errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret) {
dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret);
@@ -1303,53 +1293,41 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* log SSU(Storage Switch Unit) errors */
status = le32_to_cpu(desc[0].data[0]);
- if (status) {
- reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
- &hclge_ssu_port_based_err_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(desc[0].data[1]);
- if (status) {
- reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
- &hclge_ssu_fifo_overflow_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
+ &hclge_ssu_fifo_overflow_int[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(desc[0].data[2]);
- if (status) {
- reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT",
- &hclge_ssu_ets_tcg_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "SSU_ETS_TCG_INT",
+ &hclge_ssu_ets_tcg_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
- &hclge_igu_egu_tnl_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
+ &hclge_igu_egu_tnl_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
- &hclge_ppu_pf_abnormal_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
+ &hclge_ppu_pf_abnormal_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
@@ -1359,24 +1337,16 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
{
- struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
- struct hclge_desc desc_bd;
struct hclge_desc *desc;
int ret;
/* query the number of registers in the RAS int status */
- hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM,
- true);
- ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
- if (ret) {
- dev_err(dev, "fail(%d) to query ras int status bd num\n", ret);
+ ret = hclge_query_bd_num(hdev, true, &mpf_bd_num, &pf_bd_num);
+ if (ret)
return ret;
- }
- mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
- pf_bd_num = le32_to_cpu(desc_bd.data[1]);
- bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -1396,6 +1366,66 @@ static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
return ret;
}
+static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[3];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
+ true);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
+ true);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 3);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE AXI error sts\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_info(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
+ le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
+ le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
+ dev_info(dev, "AXI3: %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
+ le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
+
+ return 0;
+}
+
+static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
+ HCLGE_CMD_FLAG_NEXT, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_info(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
+ le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
+
+ return 0;
+}
+
static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
@@ -1403,8 +1433,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
int ret;
/* read overflow error status */
- ret = hclge_cmd_query_error(hdev, &desc[0],
- HCLGE_ROCEE_PF_RAS_INT_CMD,
+ ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
0, 0, 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
@@ -1464,19 +1493,27 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
status = le32_to_cpu(desc[0].data[0]);
- if (status & HCLGE_ROCEE_RERR_INT_MASK) {
- dev_warn(dev, "ROCEE RAS AXI rresp error\n");
- reset_type = HNAE3_FUNC_RESET;
- }
+ if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
+ if (status & HCLGE_ROCEE_RERR_INT_MASK)
+ dev_warn(dev, "ROCEE RAS AXI rresp error\n");
+
+ if (status & HCLGE_ROCEE_BERR_INT_MASK)
+ dev_warn(dev, "ROCEE RAS AXI bresp error\n");
- if (status & HCLGE_ROCEE_BERR_INT_MASK) {
- dev_warn(dev, "ROCEE RAS AXI bresp error\n");
reset_type = HNAE3_FUNC_RESET;
+
+ ret = hclge_log_rocee_axi_error(hdev);
+ if (ret)
+ return HNAE3_GLOBAL_RESET;
}
if (status & HCLGE_ROCEE_ECC_INT_MASK) {
dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
reset_type = HNAE3_GLOBAL_RESET;
+
+ ret = hclge_log_rocee_ecc_error(hdev);
+ if (ret)
+ return HNAE3_GLOBAL_RESET;
}
if (status & HCLGE_ROCEE_OVF_INT_MASK) {
@@ -1486,7 +1523,6 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
/* reset everything for now */
return HNAE3_GLOBAL_RESET;
}
- reset_type = HNAE3_FUNC_RESET;
}
/* clear error status */
@@ -1501,7 +1537,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
return reset_type;
}
-static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
+int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc;
@@ -1539,7 +1575,7 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
if (reset_type != HNAE3_NONE_RESET)
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
+ set_bit(reset_type, &ae_dev->hw_err_reset_req);
}
static const struct hclge_hw_blk hw_blk[] = {
@@ -1574,10 +1610,9 @@ static const struct hclge_hw_blk hw_blk[] = {
{ /* sentinel */ }
};
-int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
+int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state)
{
const struct hclge_hw_blk *module = hw_blk;
- struct device *dev = &hdev->pdev->dev;
int ret = 0;
while (module->name) {
@@ -1589,10 +1624,6 @@ int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
module++;
}
- ret = hclge_config_rocee_ras_interrupt(hdev, state);
- if (ret)
- dev_err(dev, "fail(%d) to configure ROCEE err int\n", ret);
-
return ret;
}
@@ -1602,165 +1633,281 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
struct device *dev = &hdev->pdev->dev;
u32 status;
+ if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
+ dev_err(dev,
+ "Can't recover - RAS error reported during dev init\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+ if (status & HCLGE_RAS_REG_NFE_MASK ||
+ status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
+ ae_dev->hw_err_reset_req = 0;
+ else
+ goto out;
+
/* Handling Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
dev_warn(dev,
"HNS Non-Fatal RAS error(status=0x%x) identified\n",
status);
hclge_handle_all_ras_errors(hdev);
- } else {
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
- hdev->pdev->revision < 0x21) {
- ae_dev->override_pci_need_reset = 1;
- return PCI_ERS_RESULT_RECOVERED;
- }
}
- if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
- dev_warn(dev, "ROCEE uncorrected RAS error identified\n");
+ /* Handling Non-fatal Rocee RAS errors */
+ if (hdev->pdev->revision >= 0x21 &&
+ status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
+ dev_warn(dev, "ROCEE Non-Fatal RAS error identified\n");
hclge_handle_rocee_ras_error(ae_dev);
}
- if (status & HCLGE_RAS_REG_NFE_MASK ||
- status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
- ae_dev->override_pci_need_reset = 0;
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ goto out;
+
+ if (ae_dev->hw_err_reset_req)
return PCI_ERS_RESULT_NEED_RESET;
- }
- ae_dev->override_pci_need_reset = 1;
+out:
return PCI_ERS_RESULT_RECOVERED;
}
-int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
- unsigned long *reset_requests)
+static int hclge_clear_hw_msix_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc, bool is_mpf,
+ u32 bd_num)
+{
+ if (is_mpf)
+ desc[0].opcode =
+ cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT);
+ else
+ desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT);
+
+ desc[0].flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
+
+ return hclge_cmd_send(&hdev->hw, &desc[0], bd_num);
+}
+
+/* hclge_query_8bd_info: query information about over_8bd_nfe_err
+ * @hdev: pointer to struct hclge_dev
+ * @vf_id: Index of the virtual function with error
+ * @q_id: Physical index of the queue with error
+ *
+ * This function get specific index of queue and function which causes
+ * over_8bd_nfe_err by using command. If vf_id is 0, it means error is
+ * caused by PF instead of VF.
+ */
+static int hclge_query_over_8bd_err_info(struct hclge_dev *hdev, u16 *vf_id,
+ u16 *q_id)
+{
+ struct hclge_query_ppu_pf_other_int_dfx_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PPU_PF_OTHER_INT_DFX, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ req = (struct hclge_query_ppu_pf_other_int_dfx_cmd *)desc.data;
+ *vf_id = le16_to_cpu(req->over_8bd_no_fe_vf_id);
+ *q_id = le16_to_cpu(req->over_8bd_no_fe_qid);
+
+ return 0;
+}
+
+/* hclge_handle_over_8bd_err: handle MSI-X error named over_8bd_nfe_err
+ * @hdev: pointer to struct hclge_dev
+ * @reset_requests: reset level that we need to trigger later
+ *
+ * over_8bd_nfe_err is a special MSI-X because it may caused by a VF, in
+ * that case, we need to trigger VF reset. Otherwise, a PF reset is needed.
+ */
+static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
{
- struct hclge_mac_tnl_stats mac_tnl_stats;
struct device *dev = &hdev->pdev->dev;
- u32 mpf_bd_num, pf_bd_num, bd_num;
- enum hnae3_reset_type reset_level;
- struct hclge_desc desc_bd;
- struct hclge_desc *desc;
- __le32 *desc_data;
- u32 status;
+ u16 vf_id;
+ u16 q_id;
int ret;
- /* query the number of bds for the MSIx int status */
- hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
- true);
- ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
+ ret = hclge_query_over_8bd_err_info(hdev, &vf_id, &q_id);
if (ret) {
- dev_err(dev, "fail(%d) to query msix int status bd num\n",
+ dev_err(dev, "fail(%d) to query over_8bd_no_fe info\n",
ret);
- return ret;
+ return;
}
- mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
- pf_bd_num = le32_to_cpu(desc_bd.data[1]);
- bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ dev_warn(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%d), queue_id(%d)\n",
+ vf_id, q_id);
- desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
- if (!desc)
- goto out;
+ if (vf_id) {
+ if (vf_id >= hdev->num_alloc_vport) {
+ dev_err(dev, "invalid vf id(%d)\n", vf_id);
+ return;
+ }
+
+ /* If we need to trigger other reset whose level is higher
+ * than HNAE3_VF_FUNC_RESET, no need to trigger a VF reset
+ * here.
+ */
+ if (*reset_requests != 0)
+ return;
+ ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
+ if (ret)
+ dev_warn(dev, "inform reset to vf(%d) failed %d!\n",
+ hdev->vport->vport_id, ret);
+ } else {
+ set_bit(HNAE3_FUNC_RESET, reset_requests);
+ }
+}
+
+/* hclge_handle_mpf_msix_error: handle all main PF MSI-X errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @mpf_bd_num: number of extended command structures
+ * @reset_requests: record of the reset level that we need
+ *
+ * This function handles all the main PF MSI-X errors in the hw register/s
+ * using command.
+ */
+static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int mpf_bd_num,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
/* query all main PF MSIx errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
if (ret) {
- dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
- ret);
- goto msi_error;
+ dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret);
+ return ret;
}
/* log MAC errors */
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data);
- if (status) {
- reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
- &hclge_mac_afifo_tnl_int[0],
- status);
- set_bit(reset_level, reset_requests);
- }
+ if (status)
+ hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
+ &hclge_mac_afifo_tnl_int[0], status,
+ reset_requests);
/* log PPU(RCB) MPF errors */
desc_data = (__le32 *)&desc[5];
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
- if (status) {
- reset_level =
- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
- &hclge_ppu_mpf_abnormal_int_st2[0],
- status);
- set_bit(reset_level, reset_requests);
- }
+ if (status)
+ dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
+ status);
/* clear all main PF MSIx errors */
- hclge_cmd_reuse_desc(&desc[0], false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
+ if (ret)
+ dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret);
- ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
- if (ret) {
- dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
- ret);
- goto msi_error;
- }
+ return ret;
+}
+
+/* hclge_handle_pf_msix_error: handle all PF MSI-X errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @mpf_bd_num: number of extended command structures
+ * @reset_requests: record of the reset level that we need
+ *
+ * This function handles all the PF MSI-X errors in the hw register/s using
+ * command.
+ */
+static int hclge_handle_pf_msix_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int pf_bd_num,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
/* query all PF MSIx errors */
- memset(desc, 0, bd_num * sizeof(struct hclge_desc));
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
if (ret) {
- dev_err(dev, "query all pf msix int cmd failed (%d)\n",
- ret);
- goto msi_error;
+ dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret);
+ return ret;
}
/* log SSU PF errors */
status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
- &hclge_ssu_port_based_pf_int[0],
- status);
- set_bit(reset_level, reset_requests);
- }
+ if (status)
+ hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_pf_int[0],
+ status, reset_requests);
/* read and log PPP PF errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*desc_data);
- if (status) {
- reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
- &hclge_ppp_pf_abnormal_int[0],
- status);
- set_bit(reset_level, reset_requests);
- }
+ if (status)
+ hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
+ &hclge_ppp_pf_abnormal_int[0],
+ status, reset_requests);
/* log PPU(RCB) PF errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
- &hclge_ppu_pf_abnormal_int[0],
- status);
- set_bit(reset_level, reset_requests);
- }
+ if (status)
+ hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
+ &hclge_ppu_pf_abnormal_int[0],
+ status, reset_requests);
+
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_OVER_8BD_ERR_MASK;
+ if (status)
+ hclge_handle_over_8bd_err(hdev, reset_requests);
/* clear all PF MSIx errors */
- hclge_cmd_reuse_desc(&desc[0], false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num);
+ if (ret)
+ dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret);
- ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
- if (ret) {
- dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
- ret);
+ return ret;
+}
+
+static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct hclge_mac_tnl_stats mac_tnl_stats;
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc *desc;
+ u32 status;
+ int ret;
+
+ /* query the number of bds for the MSIx int status */
+ ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num);
+ if (ret)
+ goto out;
+
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out;
}
+ ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num,
+ reset_requests);
+ if (ret)
+ goto msi_error;
+
+ memset(desc, 0, bd_num * sizeof(struct hclge_desc));
+ ret = hclge_handle_pf_msix_error(hdev, desc, pf_bd_num, reset_requests);
+ if (ret)
+ goto msi_error;
+
/* query and clear mac tnl interruptions */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT,
true);
@@ -1783,7 +1930,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
ret = hclge_clear_mac_tnl_int(hdev);
if (ret)
dev_err(dev, "clear mac tnl int failed (%d)\n", ret);
- set_bit(HNAE3_NONE_RESET, reset_requests);
}
msi_error:
@@ -1791,3 +1937,70 @@ msi_error:
out:
return ret;
}
+
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
+ dev_err(dev,
+ "Can't handle - MSIx error reported during dev init\n");
+ return 0;
+ }
+
+ return hclge_handle_all_hw_msix_error(hdev, reset_requests);
+}
+
+void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGE_DESC_NO_DATA_LEN 8
+
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc *desc;
+ u32 status;
+ int ret;
+
+ ae_dev->hw_err_reset_req = 0;
+ status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+
+ /* query the number of bds for the MSIx int status */
+ ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num);
+ if (ret)
+ return;
+
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return;
+
+ /* Clear HNS hw errors reported through msix */
+ memset(&desc[0].data[0], 0xFF, mpf_bd_num * sizeof(struct hclge_desc) -
+ HCLGE_DESC_NO_DATA_LEN);
+ ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
+ if (ret) {
+ dev_err(dev, "fail(%d) to clear mpf msix int during init\n",
+ ret);
+ goto msi_error;
+ }
+
+ memset(&desc[0].data[0], 0xFF, pf_bd_num * sizeof(struct hclge_desc) -
+ HCLGE_DESC_NO_DATA_LEN);
+ ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num);
+ if (ret) {
+ dev_err(dev, "fail(%d) to clear pf msix int during init\n",
+ ret);
+ goto msi_error;
+ }
+
+ /* Handle Non-fatal HNS RAS errors */
+ if (status & HCLGE_RAS_REG_NFE_MASK) {
+ dev_warn(dev, "HNS hw error(RAS) identified during init\n");
+ hclge_handle_all_ras_errors(hdev);
+ }
+
+msi_error:
+ kfree(desc);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 9645590c9294..7ea8bb28a0cb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -6,6 +6,11 @@
#include "hclge_main.h"
+#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10
+#define HCLGE_PF_RAS_INT_MIN_BD_NUM 4
+#define HCLGE_MPF_MSIX_INT_MIN_BD_NUM 10
+#define HCLGE_PF_MSIX_INT_MIN_BD_NUM 4
+
#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
#define HCLGE_RAS_REG_NFE_MASK 0xFF00
#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
@@ -47,9 +52,9 @@
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF
#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF
-#define HCLGE_MAC_TNL_INT_EN GENMASK(7, 0)
-#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(7, 0)
-#define HCLGE_MAC_TNL_INT_CLR GENMASK(7, 0)
+#define HCLGE_MAC_TNL_INT_EN GENMASK(9, 0)
+#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(9, 0)
+#define HCLGE_MAC_TNL_INT_CLR GENMASK(9, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
@@ -81,9 +86,10 @@
#define HCLGE_IGU_EGU_TNL_INT_MASK GENMASK(5, 0)
#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0)
#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0)
-#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28)
+#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK BIT(29)
#define HCLGE_PPU_PF_INT_RAS_MASK 0x18
-#define HCLGE_PPU_PF_INT_MSIX_MASK 0x27
+#define HCLGE_PPU_PF_INT_MSIX_MASK 0x26
+#define HCLGE_PPU_PF_OVER_8BD_ERR_MASK 0x01
#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0)
#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0)
#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0)
@@ -94,6 +100,7 @@
#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1
#define HCLGE_ROCEE_RERR_INT_MASK BIT(0)
#define HCLGE_ROCEE_BERR_INT_MASK BIT(1)
+#define HCLGE_ROCEE_AXI_ERR_INT_MASK GENMASK(1, 0)
#define HCLGE_ROCEE_ECC_INT_MASK BIT(2)
#define HCLGE_ROCEE_OVF_INT_MASK BIT(3)
#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000
@@ -119,7 +126,9 @@ struct hclge_hw_error {
};
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
-int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
+int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
+int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
+void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev);
pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d3b1f8cb1155..3fde5471e1c0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -27,14 +27,26 @@
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
-#define HCLGE_BUF_SIZE_UNIT 256
+#define HCLGE_BUF_SIZE_UNIT 256U
+#define HCLGE_BUF_MUL_BY 2
+#define HCLGE_BUF_DIV_BY 2
+#define NEED_RESERVE_TC_NUM 2
+#define BUF_MAX_PERCENT 100
+#define BUF_RESERVE_PERCENT 90
+
+#define HCLGE_RESET_MAX_FAIL_CNT 5
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
u16 *allocated_size, bool is_alloc);
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
+static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
+ unsigned long *addr);
static struct hnae3_ae_algo ae_algo;
@@ -290,7 +302,7 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
{
.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
- .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
+ .ethter_type = cpu_to_le16(ETH_P_LLDP),
.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
.i_port_bitmap = 0x1,
@@ -437,8 +449,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0],
- HCLGE_OPC_QUERY_RX_STATUS,
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
@@ -446,7 +457,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
if (ret) {
dev_err(&hdev->pdev->dev,
"Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
+ ret, i);
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
@@ -500,6 +511,7 @@ static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ /* each tqp has TX & RX two queues */
return kinfo->num_tqps * (2);
}
@@ -528,7 +540,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
return buff;
}
-static u64 *hclge_comm_get_stats(void *comm_stats,
+static u64 *hclge_comm_get_stats(const void *comm_stats,
const struct hclge_comm_stats_str strs[],
int size, u64 *data)
{
@@ -552,8 +564,7 @@ static u8 *hclge_comm_get_strings(u32 stringset,
return buff;
for (i = 0; i < size; i++) {
- snprintf(buff, ETH_GSTRING_LEN,
- strs[i].desc);
+ snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
buff = buff + ETH_GSTRING_LEN;
}
@@ -644,8 +655,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
return count;
}
-static void hclge_get_strings(struct hnae3_handle *handle,
- u32 stringset,
+static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
u8 *data)
{
u8 *p = (char *)data;
@@ -653,21 +663,17 @@ static void hclge_get_strings(struct hnae3_handle *handle,
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
- p = hclge_comm_get_strings(stringset,
- g_mac_stats_string,
- size,
- p);
+ p = hclge_comm_get_strings(stringset, g_mac_stats_string,
+ size, p);
p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_APP],
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
@@ -678,8 +684,7 @@ static void hclge_get_strings(struct hnae3_handle *handle,
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_PHY],
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
@@ -692,10 +697,8 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
struct hclge_dev *hdev = vport->back;
u64 *p;
- p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
- g_mac_stats_string,
- ARRAY_SIZE(g_mac_stats_string),
- data);
+ p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
+ ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
@@ -726,6 +729,8 @@ static int hclge_parse_func_status(struct hclge_dev *hdev,
static int hclge_query_function_status(struct hclge_dev *hdev)
{
+#define HCLGE_QUERY_MAX_CNT 5
+
struct hclge_func_status_cmd *req;
struct hclge_desc desc;
int timeout = 0;
@@ -738,9 +743,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "query function status failed %d.\n",
- ret);
-
+ "query function status failed %d.\n", ret);
return ret;
}
@@ -748,7 +751,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
if (req->pf_state)
break;
usleep_range(1000, 2000);
- } while (timeout++ < 5);
+ } while (timeout++ < HCLGE_QUERY_MAX_CNT);
ret = hclge_parse_func_status(hdev, req);
@@ -800,7 +803,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
- hdev->num_msi = hdev->num_roce_msi +
+ hdev->num_msi = hdev->num_roce_msi +
hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
@@ -1058,6 +1061,7 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
}
static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
@@ -1076,7 +1080,7 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
struct hclge_cfg_param_cmd *req;
u64 mac_addr_tmp_high;
u64 mac_addr_tmp;
- int i;
+ unsigned int i;
req = (struct hclge_cfg_param_cmd *)desc[0].data;
@@ -1138,7 +1142,8 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
{
struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
struct hclge_cfg_param_cmd *req;
- int i, ret;
+ unsigned int i;
+ int ret;
for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
u32 offset = 0;
@@ -1204,7 +1209,8 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev)
{
struct hclge_cfg cfg;
- int ret, i;
+ unsigned int i;
+ int ret;
ret = hclge_get_cfg(hdev, &cfg);
if (ret) {
@@ -1226,8 +1232,10 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.hw_pfc_map = 0;
hdev->wanted_umv_size = cfg.umv_space;
- if (hnae3_dev_fd_supported(hdev))
+ if (hnae3_dev_fd_supported(hdev)) {
hdev->fd_en = true;
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ }
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
@@ -1265,8 +1273,8 @@ static int hclge_configure(struct hclge_dev *hdev)
return ret;
}
-static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
- int tso_mss_max)
+static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
+ unsigned int tso_mss_max)
{
struct hclge_cfg_tso_status_cmd *req;
struct hclge_desc desc;
@@ -1352,8 +1360,9 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
req = (struct hclge_tqp_map_cmd *)desc.data;
req->tqp_id = cpu_to_le16(tqp_pid);
req->tqp_vf = func_id;
- req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
- 1 << HCLGE_TQP_MAP_EN_B;
+ req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
+ if (!is_pf)
+ req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
req->tqp_vid = cpu_to_le16(tqp_vid);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1457,11 +1466,6 @@ static int hclge_map_tqp(struct hclge_dev *hdev)
return 0;
}
-static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
-{
- /* this would be initialized later */
-}
-
static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
{
struct hnae3_handle *nic = &vport->nic;
@@ -1472,20 +1476,12 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->ae_algo = &ae_algo;
nic->numa_node_mask = hdev->numa_node_mask;
- if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
- ret = hclge_knic_setup(vport, num_tqps,
- hdev->num_tx_desc, hdev->num_rx_desc);
-
- if (ret) {
- dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
- ret);
- return ret;
- }
- } else {
- hclge_unic_setup(vport, num_tqps);
- }
+ ret = hclge_knic_setup(vport, num_tqps,
+ hdev->num_tx_desc, hdev->num_rx_desc);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
- return 0;
+ return ret;
}
static int hclge_alloc_vport(struct hclge_dev *hdev)
@@ -1591,7 +1587,8 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
static u32 hclge_get_tc_num(struct hclge_dev *hdev)
{
- int i, cnt = 0;
+ unsigned int i;
+ u32 cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i))
@@ -1604,7 +1601,8 @@ static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
- int i, cnt = 0;
+ unsigned int i;
+ int cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
@@ -1621,7 +1619,8 @@ static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
- int i, cnt = 0;
+ unsigned int i;
+ int cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
@@ -1671,7 +1670,8 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
if (hnae3_dev_dcb_supported(hdev))
- shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
+ shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
+ hdev->dv_buf_size;
else
shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ hdev->dv_buf_size;
@@ -1689,7 +1689,8 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
if (hnae3_dev_dcb_supported(hdev)) {
buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
- - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
+ - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
+ HCLGE_BUF_SIZE_UNIT);
} else {
buf_alloc->s_buf.self.high = aligned_mps +
HCLGE_NON_DCB_ADDITIONAL_BUF;
@@ -1697,14 +1698,18 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
}
if (hnae3_dev_dcb_supported(hdev)) {
+ hi_thrd = shared_buf - hdev->dv_buf_size;
+
+ if (tc_num <= NEED_RESERVE_TC_NUM)
+ hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
+ / BUF_MAX_PERCENT;
+
if (tc_num)
- hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
- else
- hi_thrd = shared_buf - hdev->dv_buf_size;
+ hi_thrd = hi_thrd / tc_num;
- hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
+ hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
- lo_thrd = hi_thrd - aligned_mps / 2;
+ lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
} else {
hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
lo_thrd = aligned_mps;
@@ -1749,7 +1754,7 @@ static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
{
u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
- int i;
+ unsigned int i;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
@@ -1765,12 +1770,13 @@ static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
- priv->wl.low = max ? aligned_mps : 256;
+ priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
priv->wl.high = roundup(priv->wl.low + aligned_mps,
HCLGE_BUF_SIZE_UNIT);
} else {
priv->wl.low = 0;
- priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
+ priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
+ aligned_mps;
}
priv->buf_size = priv->wl.high + hdev->dv_buf_size;
@@ -1789,9 +1795,10 @@ static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+ unsigned int mask = BIT((unsigned int)i);
- if (hdev->hw_tc_map & BIT(i) &&
- !(hdev->tm_info.hw_pfc_map & BIT(i))) {
+ if (hdev->hw_tc_map & mask &&
+ !(hdev->tm_info.hw_pfc_map & mask)) {
/* Clear the no pfc TC private buffer */
priv->wl.low = 0;
priv->wl.high = 0;
@@ -1818,9 +1825,10 @@ static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+ unsigned int mask = BIT((unsigned int)i);
- if (hdev->hw_tc_map & BIT(i) &&
- hdev->tm_info.hw_pfc_map & BIT(i)) {
+ if (hdev->hw_tc_map & mask &&
+ hdev->tm_info.hw_pfc_map & mask) {
/* Reduce the number of pfc TC with private buffer */
priv->wl.low = 0;
priv->enable = 0;
@@ -1837,6 +1845,55 @@ static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
+static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+#define COMPENSATE_BUFFER 0x3C00
+#define COMPENSATE_HALF_MPS_NUM 5
+#define PRIV_WL_GAP 0x1800
+
+ u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ u32 tc_num = hclge_get_tc_num(hdev);
+ u32 half_mps = hdev->mps >> 1;
+ u32 min_rx_priv;
+ unsigned int i;
+
+ if (tc_num)
+ rx_priv = rx_priv / tc_num;
+
+ if (tc_num <= NEED_RESERVE_TC_NUM)
+ rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
+
+ min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
+ COMPENSATE_HALF_MPS_NUM * half_mps;
+ min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
+ rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
+
+ if (rx_priv < min_rx_priv)
+ return false;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+
+ priv->enable = 0;
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ priv->enable = 1;
+ priv->buf_size = rx_priv;
+ priv->wl.high = rx_priv - hdev->dv_buf_size;
+ priv->wl.low = priv->wl.high - PRIV_WL_GAP;
+ }
+
+ buf_alloc->s_buf.buf_size = 0;
+
+ return true;
+}
+
/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
* @hdev: pointer to struct hclge_dev
* @buf_alloc: pointer to buffer calculation data
@@ -1856,6 +1913,9 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
return 0;
}
+ if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
+ return 0;
+
if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
return 0;
@@ -2153,7 +2213,6 @@ static int hclge_init_msi(struct hclge_dev *hdev)
static u8 hclge_check_speed_dup(u8 duplex, int speed)
{
-
if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
duplex = HCLGE_MAC_FULL;
@@ -2171,7 +2230,8 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
- hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
+ if (duplex)
+ hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
switch (speed) {
case HCLGE_MAC_SPEED_10M:
@@ -2261,7 +2321,8 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
req = (struct hclge_config_auto_neg_cmd *)desc.data;
- hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
+ if (enable)
+ hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
req->cfg_an_cmd_flag = cpu_to_le32(flag);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2316,6 +2377,17 @@ static int hclge_restart_autoneg(struct hnae3_handle *handle)
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
+static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
+ return hclge_set_autoneg_en(hdev, !halt);
+
+ return 0;
+}
+
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
@@ -2389,6 +2461,15 @@ static int hclge_mac_init(struct hclge_dev *hdev)
return ret;
}
+ if (hdev->hw.mac.support_autoneg) {
+ ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Config mac autoneg fail ret=%d\n", ret);
+ return ret;
+ }
+ }
+
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
@@ -2423,7 +2504,8 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
- if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
schedule_work(&hdev->rst_service_task);
}
@@ -2458,7 +2540,7 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
{
- int mac_state;
+ unsigned int mac_state;
int link_stat;
if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
@@ -2508,6 +2590,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
static void hclge_update_port_capability(struct hclge_mac *mac)
{
+ /* update fec ability by speed */
+ hclge_convert_setting_fec(mac);
+
/* firmware can not identify back plane type, the media type
* read from configuration can help deal it
*/
@@ -2529,7 +2614,7 @@ static void hclge_update_port_capability(struct hclge_mac *mac)
static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
{
- struct hclge_sfp_info_cmd *resp = NULL;
+ struct hclge_sfp_info_cmd *resp;
struct hclge_desc desc;
int ret;
@@ -2580,6 +2665,11 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
mac->speed_ability = le32_to_cpu(resp->speed_ability);
mac->autoneg = resp->autoneg;
mac->support_autoneg = resp->autoneg_ability;
+ mac->speed_type = QUERY_ACTIVE_SPEED;
+ if (!resp->active_fec)
+ mac->fec_mode = 0;
+ else
+ mac->fec_mode = BIT(resp->active_fec);
} else {
mac->speed_type = QUERY_SFP_SPEED;
}
@@ -2645,6 +2735,7 @@ static void hclge_service_timer(struct timer_list *t)
mod_timer(&hdev->service_timer, jiffies + HZ);
hdev->hw_stats.stats_timer++;
+ hdev->fd_arfs_expire_timer++;
hclge_task_schedule(hdev);
}
@@ -2693,19 +2784,11 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
- dev_info(&hdev->pdev->dev, "core reset interrupt\n");
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
- *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
- hdev->rst_stats.core_rst_cnt++;
- return HCLGE_VECTOR0_EVENT_RST;
- }
-
/* check for vector0 msix event source */
if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
- dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
- msix_src_reg);
+ dev_info(&hdev->pdev->dev, "received event 0x%x\n",
+ msix_src_reg);
+ *clearval = msix_src_reg;
return HCLGE_VECTOR0_EVENT_ERR;
}
@@ -2717,8 +2800,11 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
}
/* print other vector0 event source */
- dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
- cmdq_src_reg, msix_src_reg);
+ dev_info(&hdev->pdev->dev,
+ "CMDQ INT status:0x%x, other INT status:0x%x\n",
+ cmdq_src_reg, msix_src_reg);
+ *clearval = msix_src_reg;
+
return HCLGE_VECTOR0_EVENT_OTHER;
}
@@ -2754,8 +2840,8 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
+ u32 clearval = 0;
u32 event_cause;
- u32 clearval;
hclge_enable_vector(&hdev->misc_vector, false);
event_cause = hclge_check_event_cause(hdev, &clearval);
@@ -2797,7 +2883,8 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
}
/* clear the source of interrupt if it is not cause by reset */
- if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
+ if (!clearval ||
+ event_cause == HCLGE_VECTOR0_EVENT_MBX) {
hclge_clear_event_cause(hdev, event_cause, clearval);
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -2861,6 +2948,9 @@ int hclge_notify_client(struct hclge_dev *hdev,
struct hnae3_client *client = hdev->nic_client;
u16 i;
+ if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
+ return 0;
+
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
@@ -2886,7 +2976,7 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
int ret = 0;
u16 i;
- if (!client)
+ if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
return 0;
if (!client->ops->reset_notify)
@@ -2923,10 +3013,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_GLOBAL_RESET_REG;
reg_bit = HCLGE_GLOBAL_RESET_BIT;
break;
- case HNAE3_CORE_RESET:
- reg = HCLGE_GLOBAL_RESET_REG;
- reg_bit = HCLGE_CORE_RESET_BIT;
- break;
case HNAE3_FUNC_RESET:
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
@@ -3058,12 +3144,6 @@ static void hclge_do_reset(struct hclge_dev *hdev)
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Global Reset requested\n");
break;
- case HNAE3_CORE_RESET:
- val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
- hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
- hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
- dev_info(&pdev->dev, "Core Reset requested\n");
- break;
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
/* schedule again to check later */
@@ -3083,10 +3163,11 @@ static void hclge_do_reset(struct hclge_dev *hdev)
}
}
-static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
+static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr)
{
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+ struct hclge_dev *hdev = ae_dev->priv;
/* first, resolve any unknown reset type to the known type(s) */
if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
@@ -3110,16 +3191,10 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
rst_level = HNAE3_IMP_RESET;
clear_bit(HNAE3_IMP_RESET, addr);
clear_bit(HNAE3_GLOBAL_RESET, addr);
- clear_bit(HNAE3_CORE_RESET, addr);
clear_bit(HNAE3_FUNC_RESET, addr);
} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
rst_level = HNAE3_GLOBAL_RESET;
clear_bit(HNAE3_GLOBAL_RESET, addr);
- clear_bit(HNAE3_CORE_RESET, addr);
- clear_bit(HNAE3_FUNC_RESET, addr);
- } else if (test_bit(HNAE3_CORE_RESET, addr)) {
- rst_level = HNAE3_CORE_RESET;
- clear_bit(HNAE3_CORE_RESET, addr);
clear_bit(HNAE3_FUNC_RESET, addr);
} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
rst_level = HNAE3_FUNC_RESET;
@@ -3147,9 +3222,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
case HNAE3_GLOBAL_RESET:
clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
break;
- case HNAE3_CORE_RESET:
- clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
- break;
default:
break;
}
@@ -3180,6 +3252,8 @@ static int hclge_reset_prepare_down(struct hclge_dev *hdev)
static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
{
+#define HCLGE_RESET_SYNC_TIME 100
+
u32 reg_val;
int ret = 0;
@@ -3188,7 +3262,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* There is no mechanism for PF to know if VF has stopped IO
* for now, just wait 100 ms for VF to stop IO
*/
- msleep(100);
+ msleep(HCLGE_RESET_SYNC_TIME);
ret = hclge_func_reset_cmd(hdev, 0);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3208,7 +3282,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* There is no mechanism for PF to know if VF has stopped IO
* for now, just wait 100 ms for VF to stop IO
*/
- msleep(100);
+ msleep(HCLGE_RESET_SYNC_TIME);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
hdev->rst_stats.flr_rst_cnt++;
@@ -3222,6 +3296,10 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
break;
}
+ /* inform hardware that preparatory work is done */
+ msleep(HCLGE_RESET_SYNC_TIME);
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
+ HCLGE_NIC_CMQ_ENABLE);
dev_info(&hdev->pdev->dev, "prepare wait ok\n");
return ret;
@@ -3230,7 +3308,6 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
{
#define MAX_RESET_FAIL_CNT 5
-#define RESET_UPGRADE_DELAY_SEC 10
if (hdev->reset_pending) {
dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
@@ -3254,8 +3331,9 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
hclge_clear_reset_cause(hdev);
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
mod_timer(&hdev->reset_timer,
- jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
+ jiffies + HCLGE_RESET_INTERVAL);
return false;
}
@@ -3282,6 +3360,25 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev)
return ret;
}
+static int hclge_reset_stack(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+}
+
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
@@ -3325,19 +3422,8 @@ static void hclge_reset(struct hclge_dev *hdev)
goto err_reset;
rtnl_lock();
- ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
- if (ret)
- goto err_reset_lock;
- ret = hclge_reset_ae_dev(hdev->ae_dev);
- if (ret)
- goto err_reset_lock;
-
- ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- goto err_reset_lock;
-
- ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ ret = hclge_reset_stack(hdev);
if (ret)
goto err_reset_lock;
@@ -3347,16 +3433,23 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset_lock;
+ rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
+ * times
+ */
+ if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
+ goto err_reset;
+
+ rtnl_lock();
+
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
if (ret)
goto err_reset_lock;
rtnl_unlock();
- ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- goto err_reset;
-
ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
if (ret)
goto err_reset;
@@ -3399,11 +3492,12 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
if (!handle)
handle = &hdev->vport[0].nic;
- if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
+ if (time_before(jiffies, (hdev->last_reset_time +
+ HCLGE_RESET_INTERVAL)))
return;
else if (hdev->default_reset_request)
hdev->reset_level =
- hclge_get_reset_level(hdev,
+ hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
hdev->reset_level = HNAE3_FUNC_RESET;
@@ -3432,13 +3526,14 @@ static void hclge_reset_timer(struct timer_list *t)
struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
dev_info(&hdev->pdev->dev,
- "triggering global reset in reset timer\n");
- set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
+ "triggering reset in reset timer\n");
hclge_reset_event(hdev->pdev, NULL);
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
/* check if there is any ongoing reset in the hardware. This status can
* be checked from reset_pending. If there is then, we need to wait for
* hardware to complete reset.
@@ -3449,12 +3544,12 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
* now.
*/
hdev->last_reset_time = jiffies;
- hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
+ hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
if (hdev->reset_type != HNAE3_NONE_RESET)
hclge_reset(hdev);
/* check if we got any *new* reset requests to be honored */
- hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
+ hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
if (hdev->reset_type != HNAE3_NONE_RESET)
hclge_do_reset(hdev);
@@ -3521,6 +3616,11 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_port_info(hdev);
hclge_update_link_status(hdev);
hclge_update_vport_alive(hdev);
+ hclge_sync_vlan_filter(hdev);
+ if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
+ hclge_rfs_filter_expire(hdev);
+ hdev->fd_arfs_expire_timer = 0;
+ }
hclge_service_complete(hdev);
}
@@ -3614,29 +3714,28 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
const u8 hfunc, const u8 *key)
{
struct hclge_rss_config_cmd *req;
+ unsigned int key_offset = 0;
struct hclge_desc desc;
- int key_offset;
+ int key_counts;
int key_size;
int ret;
+ key_counts = HCLGE_RSS_KEY_SIZE;
req = (struct hclge_rss_config_cmd *)desc.data;
- for (key_offset = 0; key_offset < 3; key_offset++) {
+ while (key_counts) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
false);
req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
- if (key_offset == 2)
- key_size =
- HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
- else
- key_size = HCLGE_RSS_HASH_KEY_NUM;
-
+ key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
memcpy(req->hash_key,
key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
+ key_counts -= key_size;
+ key_offset++;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3995,13 +4094,14 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
struct hclge_vport *vport = hdev->vport;
u8 *rss_indir = vport[0].rss_indirection_tbl;
u16 rss_size = vport[0].alloc_rss_size;
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
u8 *key = vport[0].rss_hash_key;
u8 hfunc = vport[0].rss_algo;
- u16 tc_offset[HCLGE_MAX_TC_NUM];
u16 tc_valid[HCLGE_MAX_TC_NUM];
- u16 tc_size[HCLGE_MAX_TC_NUM];
u16 roundup_size;
- int i, ret;
+ unsigned int i;
+ int ret;
ret = hclge_set_rss_indir_table(hdev, rss_indir);
if (ret)
@@ -4156,8 +4256,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
return 0;
}
-static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
- int vector,
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4174,8 +4273,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
}
-static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
- int vector,
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4196,8 +4294,7 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
if (ret)
dev_err(&handle->pdev->dev,
"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
- vector_id,
- ret);
+ vector_id, ret);
return ret;
}
@@ -4503,19 +4600,19 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
case 0:
return false;
case BIT(INNER_DST_MAC):
- for (i = 0; i < 6; i++) {
- calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
+ for (i = 0; i < ETH_ALEN; i++) {
+ calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
rule->tuples_mask.dst_mac[i]);
- calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
+ calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
rule->tuples_mask.dst_mac[i]);
}
return true;
case BIT(INNER_SRC_MAC):
- for (i = 0; i < 6; i++) {
- calc_x(key_x[5 - i], rule->tuples.src_mac[i],
+ for (i = 0; i < ETH_ALEN; i++) {
+ calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
rule->tuples.src_mac[i]);
- calc_y(key_y[5 - i], rule->tuples.src_mac[i],
+ calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
rule->tuples.src_mac[i]);
}
@@ -4551,19 +4648,19 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
return true;
case BIT(INNER_SRC_IP):
- calc_x(tmp_x_l, rule->tuples.src_ip[3],
- rule->tuples_mask.src_ip[3]);
- calc_y(tmp_y_l, rule->tuples.src_ip[3],
- rule->tuples_mask.src_ip[3]);
+ calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
+ rule->tuples_mask.src_ip[IPV4_INDEX]);
+ calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
+ rule->tuples_mask.src_ip[IPV4_INDEX]);
*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true;
case BIT(INNER_DST_IP):
- calc_x(tmp_x_l, rule->tuples.dst_ip[3],
- rule->tuples_mask.dst_ip[3]);
- calc_y(tmp_y_l, rule->tuples.dst_ip[3],
- rule->tuples_mask.dst_ip[3]);
+ calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
+ rule->tuples_mask.dst_ip[IPV4_INDEX]);
+ calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
+ rule->tuples_mask.dst_ip[IPV4_INDEX]);
*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
@@ -4617,7 +4714,7 @@ static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
{
u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
u8 cur_pos = 0, tuple_size, shift_bits;
- int i;
+ unsigned int i;
for (i = 0; i < MAX_META_DATA; i++) {
tuple_size = meta_data_key_info[i].key_length;
@@ -4659,7 +4756,8 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
u8 *cur_key_x, *cur_key_y;
- int i, ret, tuple_size;
+ unsigned int i;
+ int ret, tuple_size;
u8 meta_data_region;
memset(key_x, 0, sizeof(key_x));
@@ -4812,6 +4910,7 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev,
*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
BIT(INNER_IP_TOS);
+ /* check whether src/dst ip address used */
if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
!tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
*unused |= BIT(INNER_SRC_IP);
@@ -4836,6 +4935,7 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev,
BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
BIT(INNER_DST_PORT);
+ /* check whether src/dst ip address used */
if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
!usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
*unused |= BIT(INNER_SRC_IP);
@@ -4906,14 +5006,18 @@ static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
struct hclge_fd_rule *rule = NULL;
struct hlist_node *node2;
+ spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
if (rule->location >= location)
break;
}
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
return rule && rule->location == location;
}
+/* make sure being called after lock up with fd_rule_lock */
static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
struct hclge_fd_rule *new_rule,
u16 location,
@@ -4937,9 +5041,13 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
kfree(rule);
hdev->hclge_fd_rule_num--;
- if (!is_add)
- return 0;
+ if (!is_add) {
+ if (!hdev->hclge_fd_rule_num)
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ clear_bit(location, hdev->fd_bmap);
+ return 0;
+ }
} else if (!is_add) {
dev_err(&hdev->pdev->dev,
"delete fail, rule %d is inexistent\n",
@@ -4954,7 +5062,9 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
else
hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
+ set_bit(location, hdev->fd_bmap);
hdev->hclge_fd_rule_num++;
+ hdev->fd_active_type = new_rule->rule_type;
return 0;
}
@@ -4969,14 +5079,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
case SCTP_V4_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
- rule->tuples.src_ip[3] =
+ rule->tuples.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
- rule->tuples_mask.src_ip[3] =
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
- rule->tuples.dst_ip[3] =
+ rule->tuples.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
- rule->tuples_mask.dst_ip[3] =
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
@@ -4995,14 +5105,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
break;
case IP_USER_FLOW:
- rule->tuples.src_ip[3] =
+ rule->tuples.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
- rule->tuples_mask.src_ip[3] =
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
- rule->tuples.dst_ip[3] =
+ rule->tuples.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
- rule->tuples_mask.dst_ip[3] =
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
@@ -5019,14 +5129,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
case TCP_V6_FLOW:
case UDP_V6_FLOW:
be32_to_cpu_array(rule->tuples.src_ip,
- fs->h_u.tcp_ip6_spec.ip6src, 4);
+ fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.src_ip,
- fs->m_u.tcp_ip6_spec.ip6src, 4);
+ fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples.dst_ip,
- fs->h_u.tcp_ip6_spec.ip6dst, 4);
+ fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.dst_ip,
- fs->m_u.tcp_ip6_spec.ip6dst, 4);
+ fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
rule->tuples_mask.src_port =
@@ -5042,14 +5152,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
break;
case IPV6_USER_FLOW:
be32_to_cpu_array(rule->tuples.src_ip,
- fs->h_u.usr_ip6_spec.ip6src, 4);
+ fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.src_ip,
- fs->m_u.usr_ip6_spec.ip6src, 4);
+ fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples.dst_ip,
- fs->h_u.usr_ip6_spec.ip6dst, 4);
+ fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.dst_ip,
- fs->m_u.usr_ip6_spec.ip6dst, 4);
+ fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
@@ -5112,6 +5222,36 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
return 0;
}
+/* make sure being called after lock up with fd_rule_lock */
+static int hclge_fd_config_rule(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ int ret;
+
+ if (!rule) {
+ dev_err(&hdev->pdev->dev,
+ "The flow director rule is NULL\n");
+ return -EINVAL;
+ }
+
+ /* it will never fail here, so needn't to check return value */
+ hclge_fd_update_rule_list(hdev, rule, rule->location, true);
+
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto clear_rule;
+
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto clear_rule;
+
+ return 0;
+
+clear_rule:
+ hclge_fd_update_rule_list(hdev, rule, rule->location, false);
+ return ret;
+}
+
static int hclge_add_fd_entry(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
@@ -5174,8 +5314,10 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
return -ENOMEM;
ret = hclge_fd_get_tuple(hdev, fs, rule);
- if (ret)
- goto free_rule;
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
rule->flow_type = fs->flow_type;
@@ -5184,24 +5326,19 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
rule->vf_id = dst_vport_id;
rule->queue_id = q_index;
rule->action = action;
+ rule->rule_type = HCLGE_FD_EP_ACTIVE;
- ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
- if (ret)
- goto free_rule;
+ /* to avoid rule conflict, when user configure rule by ethtool,
+ * we need to clear all arfs rules
+ */
+ hclge_clear_arfs_rules(handle);
- ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
- if (ret)
- goto free_rule;
+ spin_lock_bh(&hdev->fd_rule_lock);
+ ret = hclge_fd_config_rule(hdev, rule);
- ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
- if (ret)
- goto free_rule;
+ spin_unlock_bh(&hdev->fd_rule_lock);
return ret;
-
-free_rule:
- kfree(rule);
- return ret;
}
static int hclge_del_fd_entry(struct hnae3_handle *handle,
@@ -5222,18 +5359,21 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (!hclge_fd_rule_exist(hdev, fs->location)) {
dev_err(&hdev->pdev->dev,
- "Delete fail, rule %d is inexistent\n",
- fs->location);
+ "Delete fail, rule %d is inexistent\n", fs->location);
return -ENOENT;
}
- ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
- fs->location, NULL, false);
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
+ NULL, false);
if (ret)
return ret;
- return hclge_fd_update_rule_list(hdev, NULL, fs->location,
- false);
+ spin_lock_bh(&hdev->fd_rule_lock);
+ ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return ret;
}
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
@@ -5243,25 +5383,30 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node;
+ u16 location;
if (!hnae3_dev_fd_supported(hdev))
return;
+ spin_lock_bh(&hdev->fd_rule_lock);
+ for_each_set_bit(location, hdev->fd_bmap,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
+ NULL, false);
+
if (clear_list) {
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
rule_node) {
- hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
- rule->location, NULL, false);
hlist_del(&rule->rule_node);
kfree(rule);
- hdev->hclge_fd_rule_num--;
}
- } else {
- hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
- rule_node)
- hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
- rule->location, NULL, false);
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ hdev->hclge_fd_rule_num = 0;
+ bitmap_zero(hdev->fd_bmap,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
}
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
}
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
@@ -5283,6 +5428,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
if (!hdev->fd_en)
return 0;
+ spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
if (!ret)
@@ -5292,11 +5438,18 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
dev_warn(&hdev->pdev->dev,
"Restore rule %d failed, remove it\n",
rule->location);
+ clear_bit(rule->location, hdev->fd_bmap);
hlist_del(&rule->rule_node);
kfree(rule);
hdev->hclge_fd_rule_num--;
}
}
+
+ if (hdev->hclge_fd_rule_num)
+ hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
return 0;
}
@@ -5329,13 +5482,18 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ spin_lock_bh(&hdev->fd_rule_lock);
+
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
if (rule->location >= fs->location)
break;
}
- if (!rule || fs->location != rule->location)
+ if (!rule || fs->location != rule->location) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
return -ENOENT;
+ }
fs->flow_type = rule->flow_type;
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
@@ -5343,16 +5501,16 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
case TCP_V4_FLOW:
case UDP_V4_FLOW:
fs->h_u.tcp_ip4_spec.ip4src =
- cpu_to_be32(rule->tuples.src_ip[3]);
+ cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
fs->m_u.tcp_ip4_spec.ip4src =
- rule->unused_tuple & BIT(INNER_SRC_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
+ rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
fs->h_u.tcp_ip4_spec.ip4dst =
- cpu_to_be32(rule->tuples.dst_ip[3]);
+ cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
fs->m_u.tcp_ip4_spec.ip4dst =
- rule->unused_tuple & BIT(INNER_DST_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
+ rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
fs->m_u.tcp_ip4_spec.psrc =
@@ -5372,16 +5530,16 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
break;
case IP_USER_FLOW:
fs->h_u.usr_ip4_spec.ip4src =
- cpu_to_be32(rule->tuples.src_ip[3]);
+ cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
fs->m_u.tcp_ip4_spec.ip4src =
- rule->unused_tuple & BIT(INNER_SRC_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
+ rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
fs->h_u.usr_ip4_spec.ip4dst =
- cpu_to_be32(rule->tuples.dst_ip[3]);
+ cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
fs->m_u.usr_ip4_spec.ip4dst =
- rule->unused_tuple & BIT(INNER_DST_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
+ rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
fs->m_u.usr_ip4_spec.tos =
@@ -5400,20 +5558,22 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
case TCP_V6_FLOW:
case UDP_V6_FLOW:
cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
- rule->tuples.src_ip, 4);
+ rule->tuples.src_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
- memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
+ memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
+ sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
- rule->tuples_mask.src_ip, 4);
+ rule->tuples_mask.src_ip, IPV6_SIZE);
cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
- rule->tuples.dst_ip, 4);
+ rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP))
- memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
+ memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
+ sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
- rule->tuples_mask.dst_ip, 4);
+ rule->tuples_mask.dst_ip, IPV6_SIZE);
fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
fs->m_u.tcp_ip6_spec.psrc =
@@ -5428,20 +5588,22 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
break;
case IPV6_USER_FLOW:
cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
- rule->tuples.src_ip, 4);
+ rule->tuples.src_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
- memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
+ memset(fs->m_u.usr_ip6_spec.ip6src, 0,
+ sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
- rule->tuples_mask.src_ip, 4);
+ rule->tuples_mask.src_ip, IPV6_SIZE);
cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
- rule->tuples.dst_ip, 4);
+ rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP))
- memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
+ memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
+ sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
- rule->tuples_mask.dst_ip, 4);
+ rule->tuples_mask.dst_ip, IPV6_SIZE);
fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
fs->m_u.usr_ip6_spec.l4_proto =
@@ -5474,6 +5636,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
break;
default:
+ spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP;
}
@@ -5505,6 +5668,8 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
fs->ring_cookie |= vf_id;
}
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
return 0;
}
@@ -5522,20 +5687,208 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+ spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2,
&hdev->fd_rule_list, rule_node) {
- if (cnt == cmd->rule_cnt)
+ if (cnt == cmd->rule_cnt) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
return -EMSGSIZE;
+ }
rule_locs[cnt] = rule->location;
cnt++;
}
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
cmd->rule_cnt = cnt;
return 0;
}
+static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
+ struct hclge_fd_rule_tuples *tuples)
+{
+ tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
+ tuples->ip_proto = fkeys->basic.ip_proto;
+ tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
+ tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
+ } else {
+ memcpy(tuples->src_ip,
+ fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
+ sizeof(tuples->src_ip));
+ memcpy(tuples->dst_ip,
+ fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
+ sizeof(tuples->dst_ip));
+ }
+}
+
+/* traverse all rules, check whether an existed rule has the same tuples */
+static struct hclge_fd_rule *
+hclge_fd_search_flow_keys(struct hclge_dev *hdev,
+ const struct hclge_fd_rule_tuples *tuples)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
+ return rule;
+ }
+
+ return NULL;
+}
+
+static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
+ struct hclge_fd_rule *rule)
+{
+ rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
+ BIT(INNER_SRC_PORT);
+ rule->action = 0;
+ rule->vf_id = 0;
+ rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
+ if (tuples->ether_proto == ETH_P_IP) {
+ if (tuples->ip_proto == IPPROTO_TCP)
+ rule->flow_type = TCP_V4_FLOW;
+ else
+ rule->flow_type = UDP_V4_FLOW;
+ } else {
+ if (tuples->ip_proto == IPPROTO_TCP)
+ rule->flow_type = TCP_V6_FLOW;
+ else
+ rule->flow_type = UDP_V6_FLOW;
+ }
+ memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
+ memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
+}
+
+static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
+ u16 flow_id, struct flow_keys *fkeys)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_fd_rule_tuples new_tuples;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ u16 tmp_queue_id;
+ u16 bit_id;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ memset(&new_tuples, 0, sizeof(new_tuples));
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ /* when there is already fd rule existed add by user,
+ * arfs should not work
+ */
+ if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return -EOPNOTSUPP;
+ }
+
+ /* check is there flow director filter existed for this flow,
+ * if not, create a new filter for it;
+ * if filter exist with different queue id, modify the filter;
+ * if filter exist with same queue id, do nothing
+ */
+ rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
+ if (!rule) {
+ bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
+ if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return -ENOSPC;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return -ENOMEM;
+ }
+
+ set_bit(bit_id, hdev->fd_bmap);
+ rule->location = bit_id;
+ rule->flow_id = flow_id;
+ rule->queue_id = queue_id;
+ hclge_fd_build_arfs_rule(&new_tuples, rule);
+ ret = hclge_fd_config_rule(hdev, rule);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (ret)
+ return ret;
+
+ return rule->location;
+ }
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (rule->queue_id == queue_id)
+ return rule->location;
+
+ tmp_queue_id = rule->queue_id;
+ rule->queue_id = queue_id;
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret) {
+ rule->queue_id = tmp_queue_id;
+ return ret;
+ }
+
+ return rule->location;
+}
+
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ HLIST_HEAD(del_list);
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return;
+ }
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (rps_may_expire_flow(handle->netdev, rule->queue_id,
+ rule->flow_id, rule->location)) {
+ hlist_del_init(&rule->rule_node);
+ hlist_add_head(&rule->rule_node, &del_list);
+ hdev->hclge_fd_rule_num--;
+ clear_bit(rule->location, hdev->fd_bmap);
+ }
+ }
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ kfree(rule);
+ }
+#endif
+}
+
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
+ hclge_del_all_fd_entries(handle, true);
+#endif
+}
+
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -5565,10 +5918,12 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ bool clear;
hdev->fd_en = enable;
+ clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
if (!enable)
- hclge_del_all_fd_entries(handle, false);
+ hclge_del_all_fd_entries(handle, clear);
else
hclge_restore_fd_entries(handle);
}
@@ -5582,20 +5937,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+
+ if (enable) {
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
+ }
+
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -5726,7 +6081,7 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
return -EBUSY;
}
-static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
+static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable)
{
struct hclge_desc desc;
@@ -5737,7 +6092,8 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
req->stream_id = cpu_to_le16(stream_id);
- req->enable |= enable << HCLGE_TQP_ENABLE_B;
+ if (enable)
+ req->enable |= 1U << HCLGE_TQP_ENABLE_B;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -5838,6 +6194,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ hclge_clear_arfs_rules(handle);
+
/* If it is not PF reset, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
@@ -5903,11 +6261,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
if (op == HCLGE_MAC_VLAN_ADD) {
if ((!resp_code) || (resp_code == 1)) {
return_status = 0;
- } else if (resp_code == 2) {
+ } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for uc_overflow.\n");
- } else if (resp_code == 3) {
+ } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for mc_overflow.\n");
@@ -5952,13 +6310,15 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
{
- int word_num;
- int bit_num;
+#define HCLGE_VF_NUM_IN_FIRST_DESC 192
+
+ unsigned int word_num;
+ unsigned int bit_num;
if (vfid > 255 || vfid < 0)
return -EIO;
- if (vfid >= 0 && vfid <= 191) {
+ if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
word_num = vfid / 32;
bit_num = vfid % 32;
if (clr)
@@ -5966,7 +6326,7 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
else
desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else {
- word_num = (vfid - 192) / 32;
+ word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
bit_num = vfid % 32;
if (clr)
desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
@@ -6149,6 +6509,10 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
mutex_init(&hdev->umv_mutex);
hdev->max_umv_size = allocated_size;
+ /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
+ * preserve some unicast mac vlan table entries shared by pf
+ * and its vfs.
+ */
hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_req_vfs + 2);
@@ -6181,7 +6545,9 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
req = (struct hclge_umv_spc_alc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
- hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
+ if (!is_alloc)
+ hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
+
req->space_size = cpu_to_le32(space_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -6270,8 +6636,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
is_multicast_ether_addr(addr)) {
dev_err(&hdev->pdev->dev,
"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
- addr,
- is_zero_ether_addr(addr),
+ addr, is_zero_ether_addr(addr),
is_broadcast_ether_addr(addr),
is_multicast_ether_addr(addr));
return -EINVAL;
@@ -6338,9 +6703,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
- dev_dbg(&hdev->pdev->dev,
- "Remove mac err! invalid mac:%pM.\n",
- addr);
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
+ addr);
return -EINVAL;
}
@@ -6381,18 +6745,16 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
- if (!status) {
- /* This mac addr exist, update VFID for it */
- hclge_update_desc_vfid(desc, vport->vport_id, false);
- status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- } else {
+ if (status) {
/* This mac addr do not exist, add new entry for it */
memset(desc[0].data, 0, sizeof(desc[0].data));
memset(desc[1].data, 0, sizeof(desc[0].data));
memset(desc[2].data, 0, sizeof(desc[0].data));
- hclge_update_desc_vfid(desc, vport->vport_id, false);
- status = hclge_add_mac_vlan_tbl(vport, &req, desc);
}
+ status = hclge_update_desc_vfid(desc, vport->vport_id, false);
+ if (status)
+ return status;
+ status = hclge_add_mac_vlan_tbl(vport, &req, desc);
if (status == -ENOSPC)
dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
@@ -6430,7 +6792,9 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
/* This mac addr exist, remove this handle's VFID for it */
- hclge_update_desc_vfid(desc, vport->vport_id, true);
+ status = hclge_update_desc_vfid(desc, vport->vport_id, true);
+ if (status)
+ return status;
if (hclge_is_all_function_id_zero(desc))
/* All the vfid is zero, so need to delete this entry */
@@ -6759,7 +7123,7 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
-static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
bool is_kill, u16 vlan, u8 qos,
__be16 proto)
{
@@ -6771,6 +7135,12 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
u8 vf_byte_off;
int ret;
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
+ * is unable and unnecessary to add new vlan id to vf vlan filter
+ */
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
+ return 0;
+
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
hclge_cmd_setup_basic_desc(&desc[1],
@@ -6806,6 +7176,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
return 0;
if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ set_bit(vfid, hdev->vf_vlan_full);
dev_warn(&hdev->pdev->dev,
"vf vlan table is full, vf vlan filter is disabled\n");
return 0;
@@ -6819,12 +7190,13 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
if (!req0->resp_code)
return 0;
- if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
- dev_warn(&hdev->pdev->dev,
- "vlan %d filter is not in vf vlan table\n",
- vlan);
+ /* vf vlan filter is disabled when vf vlan table is full,
+ * then new vlan id will not be added into vf vlan table.
+ * Just return 0 without warning, avoid massive verbose
+ * print logs when unload.
+ */
+ if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
return 0;
- }
dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%d.\n",
@@ -7140,10 +7512,6 @@ static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
{
struct hclge_vport_vlan_cfg *vlan;
- /* vlan 0 is reserved */
- if (!vlan_id)
- return;
-
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan)
return;
@@ -7238,6 +7606,43 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
mutex_unlock(&hdev->vport_cfg_mutex);
}
+static void hclge_restore_vlan_table(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ u16 vlan_proto, qos;
+ u16 state, vlan_id;
+ int i;
+
+ mutex_lock(&hdev->vport_cfg_mutex);
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
+ vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ qos = vport->port_base_vlan_cfg.vlan_info.qos;
+ state = vport->port_base_vlan_cfg.state;
+
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
+ hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
+ vport->vport_id, vlan_id, qos,
+ false);
+ continue;
+ }
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->hd_tbl_status)
+ hclge_set_vlan_filter_hw(hdev,
+ htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, 0,
+ false);
+ }
+ }
+
+ mutex_unlock(&hdev->vport_cfg_mutex);
+}
+
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -7415,11 +7820,20 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false;
int ret = 0;
- /* when port based VLAN enabled, we use port based VLAN as the VLAN
- * filter entry. In this case, we don't update VLAN filter table
- * when user add new VLAN or remove exist VLAN, just update the vport
- * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
- * table until port based VLAN disabled
+ /* When device is resetting, firmware is unable to handle
+ * mailbox. Just record the vlan id, and remove it after
+ * reset finished.
+ */
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ return -EBUSY;
+ }
+
+ /* When port base vlan enabled, we use port base vlan as the vlan
+ * filter entry. In this case, we don't update vlan filter table
+ * when user add new vlan or remove exist vlan, just update the vport
+ * vlan list. The vlan id in vlan list will be writen in vlan filter
+ * table until port base vlan disabled
*/
if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
@@ -7427,16 +7841,53 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
writen_to_tbl = true;
}
- if (ret)
- return ret;
+ if (!ret) {
+ if (is_kill)
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ else
+ hclge_add_vport_vlan_table(vport, vlan_id,
+ writen_to_tbl);
+ } else if (is_kill) {
+ /* When remove hw vlan filter failed, record the vlan id,
+ * and try to remove it from hw later, to be consistence
+ * with stack
+ */
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ }
+ return ret;
+}
- if (is_kill)
- hclge_rm_vport_vlan_table(vport, vlan_id, false);
- else
- hclge_add_vport_vlan_table(vport, vlan_id,
- writen_to_tbl);
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+{
+#define HCLGE_MAX_SYNC_COUNT 60
- return 0;
+ int i, ret, sync_cnt = 0;
+ u16 vlan_id;
+
+ /* start from vport 1 for PF is always alive */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ while (vlan_id != VLAN_N_VID) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id, vlan_id,
+ 0, true);
+ if (ret && ret != -EINVAL)
+ return;
+
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+
+ sync_cnt++;
+ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
+ return;
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ }
+ }
}
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
@@ -7463,7 +7914,7 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
{
struct hclge_dev *hdev = vport->back;
- int i, max_frm_size, ret = 0;
+ int i, max_frm_size, ret;
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
@@ -7523,7 +7974,8 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
- hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
+ if (enable)
+ hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -7574,7 +8026,7 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
- int ret = 0;
+ int ret;
queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
@@ -7591,7 +8043,6 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
return ret;
}
- reset_try_times = 0;
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
/* Wait for tqp hw reset */
msleep(20);
@@ -7630,7 +8081,6 @@ void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
return;
}
- reset_try_times = 0;
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
/* Wait for tqp hw reset */
msleep(20);
@@ -7700,7 +8150,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
{
struct phy_device *phydev = hdev->hw.mac.phydev;
u16 remote_advertising = 0;
- u16 local_advertising = 0;
+ u16 local_advertising;
u32 rx_pause, tx_pause;
u8 flowctl;
@@ -7733,8 +8183,9 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
- *auto_neg = hclge_get_autoneg(handle);
+ *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
*rx_en = 0;
@@ -7765,11 +8216,13 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
struct phy_device *phydev = hdev->hw.mac.phydev;
u32 fc_autoneg;
- fc_autoneg = hclge_get_autoneg(handle);
- if (auto_neg != fc_autoneg) {
- dev_info(&hdev->pdev->dev,
- "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
- return -EOPNOTSUPP;
+ if (phydev) {
+ fc_autoneg = hclge_get_autoneg(handle);
+ if (auto_neg != fc_autoneg) {
+ dev_info(&hdev->pdev->dev,
+ "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+ return -EOPNOTSUPP;
+ }
}
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
@@ -7780,16 +8233,13 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
- if (!fc_autoneg)
+ if (!auto_neg)
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
if (phydev)
return phy_start_aneg(phydev);
- if (hdev->pdev->revision == 0x20)
- return -EOPNOTSUPP;
-
- return hclge_restart_autoneg(handle);
+ return -EOPNOTSUPP;
}
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
@@ -7825,7 +8275,8 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct phy_device *phydev = hdev->hw.mac.phydev;
- int mdix_ctrl, mdix, retval, is_resolved;
+ int mdix_ctrl, mdix, is_resolved;
+ unsigned int retval;
if (!phydev) {
*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
@@ -7894,6 +8345,102 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "PF info end.\n");
}
+static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_client *client = vport->nic.client;
+ struct hclge_dev *hdev = ae_dev->priv;
+ int rst_cnt;
+ int ret;
+
+ rst_cnt = hdev->rst_stats.reset_cnt;
+ ret = client->ops->init_instance(&vport->nic);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.reset_cnt) {
+ ret = -EBUSY;
+ goto init_nic_err;
+ }
+
+ /* Enable nic hw error interrupts */
+ ret = hclge_config_nic_hw_error(hdev, true);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "fail(%d) to enable hw error interrupts\n", ret);
+ goto init_nic_err;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ if (netif_msg_drv(&hdev->vport->nic))
+ hclge_info_show(hdev);
+
+ return ret;
+
+init_nic_err:
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ client->ops->uninit_instance(&vport->nic, 0);
+
+ return ret;
+}
+
+static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_client *client = vport->roce.client;
+ struct hclge_dev *hdev = ae_dev->priv;
+ int rst_cnt;
+ int ret;
+
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
+ !hdev->nic_client)
+ return 0;
+
+ client = hdev->roce_client;
+ ret = hclge_init_roce_base_info(vport);
+ if (ret)
+ return ret;
+
+ rst_cnt = hdev->rst_stats.reset_cnt;
+ ret = client->ops->init_instance(&vport->roce);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.reset_cnt) {
+ ret = -EBUSY;
+ goto init_roce_err;
+ }
+
+ /* Enable roce ras interrupts */
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "fail(%d) to enable roce ras interrupts\n", ret);
+ goto init_roce_err;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ return 0;
+
+init_roce_err:
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
+
+ return ret;
+}
+
static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -7909,41 +8456,13 @@ static int hclge_init_client_instance(struct hnae3_client *client,
hdev->nic_client = client;
vport->nic.client = client;
- ret = client->ops->init_instance(&vport->nic);
+ ret = hclge_init_nic_client_instance(ae_dev, vport);
if (ret)
goto clear_nic;
- hnae3_set_client_init_flag(client, ae_dev, 1);
-
- if (netif_msg_drv(&hdev->vport->nic))
- hclge_info_show(hdev);
-
- if (hdev->roce_client &&
- hnae3_dev_roce_supported(hdev)) {
- struct hnae3_client *rc = hdev->roce_client;
-
- ret = hclge_init_roce_base_info(vport);
- if (ret)
- goto clear_roce;
-
- ret = rc->ops->init_instance(&vport->roce);
- if (ret)
- goto clear_roce;
-
- hnae3_set_client_init_flag(hdev->roce_client,
- ae_dev, 1);
- }
-
- break;
- case HNAE3_CLIENT_UNIC:
- hdev->nic_client = client;
- vport->nic.client = client;
-
- ret = client->ops->init_instance(&vport->nic);
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
if (ret)
- goto clear_nic;
-
- hnae3_set_client_init_flag(client, ae_dev, 1);
+ goto clear_roce;
break;
case HNAE3_CLIENT_ROCE:
@@ -7952,17 +8471,9 @@ static int hclge_init_client_instance(struct hnae3_client *client,
vport->roce.client = client;
}
- if (hdev->roce_client && hdev->nic_client) {
- ret = hclge_init_roce_base_info(vport);
- if (ret)
- goto clear_roce;
-
- ret = client->ops->init_instance(&vport->roce);
- if (ret)
- goto clear_roce;
-
- hnae3_set_client_init_flag(client, ae_dev, 1);
- }
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_roce;
break;
default:
@@ -7970,7 +8481,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
}
}
- return 0;
+ return ret;
clear_nic:
hdev->nic_client = NULL;
@@ -7992,6 +8503,10 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
vport = &hdev->vport[i];
if (hdev->roce_client) {
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
hdev->roce_client->ops->uninit_instance(&vport->roce,
0);
hdev->roce_client = NULL;
@@ -8000,6 +8515,10 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (client->type == HNAE3_CLIENT_ROCE)
return;
if (hdev->nic_client && client->ops->uninit_instance) {
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
client->ops->uninit_instance(&vport->nic, 0);
hdev->nic_client = NULL;
vport->nic.client = NULL;
@@ -8081,6 +8600,7 @@ static void hclge_state_init(struct hclge_dev *hdev)
static void hclge_state_uninit(struct hclge_dev *hdev)
{
set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ set_bit(HCLGE_STATE_REMOVING, &hdev->state);
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
@@ -8122,6 +8642,23 @@ static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
}
+static void hclge_clear_resetting_state(struct hclge_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+ /* Send cmd to clear VF's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "clear vf(%d) rst failed %d!\n",
+ vport->vport_id, ret);
+ }
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -8143,6 +8680,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_init(&hdev->vport_lock);
mutex_init(&hdev->vport_cfg_mutex);
+ spin_lock_init(&hdev->fd_rule_lock);
ret = hclge_pci_init(hdev);
if (ret) {
@@ -8270,13 +8808,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- ret = hclge_hw_error_set_state(hdev, true);
- if (ret) {
- dev_err(&pdev->dev,
- "fail(%d) to enable hw error interrupts\n", ret);
- goto err_mdiobus_unreg;
- }
-
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -8288,6 +8819,22 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
hclge_clear_all_event_cause(hdev);
+ hclge_clear_resetting_state(hdev);
+
+ /* Log and clear the hw errors those already occurred */
+ hclge_handle_all_hns_hw_errors(ae_dev);
+
+ /* request delayed reset for the error recovery because an immediate
+ * global reset on a PF affecting pending initialization of other PFs
+ */
+ if (ae_dev->hw_err_reset_req) {
+ enum hnae3_reset_type reset_level;
+
+ reset_level = hclge_get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ hclge_set_def_reset_request(ae_dev, reset_level);
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
+ }
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
@@ -8342,6 +8889,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_stats_clear(hdev);
memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
+ memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
ret = hclge_cmd_init(hdev);
if (ret) {
@@ -8393,21 +8941,31 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_init_fd_config(hdev);
if (ret) {
- dev_err(&pdev->dev,
- "fd table init fail, ret=%d\n", ret);
+ dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
return ret;
}
/* Re-enable the hw error interrupts because
- * the interrupts get disabled on core/global reset.
+ * the interrupts get disabled on global reset.
*/
- ret = hclge_hw_error_set_state(hdev, true);
+ ret = hclge_config_nic_hw_error(hdev, true);
if (ret) {
dev_err(&pdev->dev,
- "fail(%d) to re-enable HNS hw error interrupts\n", ret);
+ "fail(%d) to re-enable NIC hw error interrupts\n",
+ ret);
return ret;
}
+ if (hdev->roce_client) {
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail(%d) to re-enable roce ras interrupts\n",
+ ret);
+ return ret;
+ }
+ }
+
hclge_reset_vport_state(hdev);
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
@@ -8432,8 +8990,11 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
+ /* Disable all hw interrupts */
hclge_config_mac_tnl_int(hdev, false);
- hclge_hw_error_set_state(hdev, false);
+ hclge_config_nic_hw_error(hdev, false);
+ hclge_config_rocee_ras_interrupt(hdev, false);
+
hclge_cmd_uninit(hdev);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
@@ -8478,15 +9039,16 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
int cur_rss_size = kinfo->rss_size;
int cur_tqps = kinfo->num_tqps;
- u16 tc_offset[HCLGE_MAX_TC_NUM];
u16 tc_valid[HCLGE_MAX_TC_NUM];
- u16 tc_size[HCLGE_MAX_TC_NUM];
u16 roundup_size;
u32 *rss_indir;
- int ret, i;
+ unsigned int i;
+ int ret;
kinfo->req_rss_size = new_tqps_num;
@@ -8571,10 +9133,12 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
void *data)
{
#define HCLGE_32_BIT_REG_RTN_DATANUM 8
+#define HCLGE_32_BIT_DESC_NODATA_LEN 2
struct hclge_desc *desc;
u32 *reg_val = data;
__le32 *desc_data;
+ int nodata_num;
int cmd_num;
int i, k, n;
int ret;
@@ -8582,7 +9146,9 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
if (regs_num == 0)
return 0;
- cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
+ nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
+ HCLGE_32_BIT_REG_RTN_DATANUM);
desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -8599,7 +9165,7 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
for (i = 0; i < cmd_num; i++) {
if (i == 0) {
desc_data = (__le32 *)(&desc[i].data[0]);
- n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
+ n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
} else {
desc_data = (__le32 *)(&desc[i]);
n = HCLGE_32_BIT_REG_RTN_DATANUM;
@@ -8621,10 +9187,12 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
void *data)
{
#define HCLGE_64_BIT_REG_RTN_DATANUM 4
+#define HCLGE_64_BIT_DESC_NODATA_LEN 1
struct hclge_desc *desc;
u64 *reg_val = data;
__le64 *desc_data;
+ int nodata_len;
int cmd_num;
int i, k, n;
int ret;
@@ -8632,7 +9200,9 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
if (regs_num == 0)
return 0;
- cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
+ nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
+ HCLGE_64_BIT_REG_RTN_DATANUM);
desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -8649,7 +9219,7 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
for (i = 0; i < cmd_num; i++) {
if (i == 0) {
desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
+ n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
} else {
desc_data = (__le64 *)(&desc[i]);
n = HCLGE_64_BIT_REG_RTN_DATANUM;
@@ -8876,6 +9446,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.restart_autoneg = hclge_restart_autoneg,
+ .halt_autoneg = hclge_halt_autoneg,
.get_pauseparam = hclge_get_pauseparam,
.set_pauseparam = hclge_set_pauseparam,
.set_mtu = hclge_set_mtu,
@@ -8892,6 +9463,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
+ .get_reset_level = hclge_get_reset_level,
.set_default_reset_request = hclge_set_def_reset_request,
.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
.set_channels = hclge_set_channels,
@@ -8908,6 +9480,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
+ .add_arfs_entry = hclge_add_fd_entry_by_arfs,
.dbg_run_cmd = hclge_dbg_run_cmd,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
@@ -8918,6 +9491,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_timer_task = hclge_set_timer_task,
.mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy,
+ .restore_vlan_table = hclge_restore_vlan_table,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index dd06b11187b0..6a12285f4c76 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -201,6 +201,8 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_DOWN,
HCLGE_STATE_DISABLED,
HCLGE_STATE_REMOVING,
+ HCLGE_STATE_NIC_REGISTERED,
+ HCLGE_STATE_ROCE_REGISTERED,
HCLGE_STATE_SERVICE_INITED,
HCLGE_STATE_SERVICE_SCHED,
HCLGE_STATE_RST_SERVICE_SCHED,
@@ -472,6 +474,7 @@ enum HCLGE_FD_KEY_TYPE {
enum HCLGE_FD_STAGE {
HCLGE_FD_STAGE_1,
HCLGE_FD_STAGE_2,
+ MAX_STAGE_NUM,
};
/* OUTER_XXX indicates tuples in tunnel header of tunnel packet
@@ -526,7 +529,7 @@ enum HCLGE_FD_META_DATA {
struct key_info {
u8 key_type;
- u8 key_length;
+ u8 key_length; /* use bit as unit */
};
static const struct key_info meta_data_key_info[] = {
@@ -578,6 +581,16 @@ static const struct key_info tuple_key_info[] = {
#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
#define MAX_META_DATA_LENGTH 32
+/* assigned by firmware, the real filter number for each pf may be less */
+#define MAX_FD_FILTER_NUM 4096
+#define HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL 5
+
+enum HCLGE_FD_ACTIVE_RULE_TYPE {
+ HCLGE_FD_RULE_NONE,
+ HCLGE_FD_ARFS_ACTIVE,
+ HCLGE_FD_EP_ACTIVE,
+};
+
enum HCLGE_FD_PACKET_TYPE {
NIC_PACKET,
ROCE_PACKET,
@@ -600,18 +613,23 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg {
u8 fd_mode;
- u16 max_key_length;
+ u16 max_key_length; /* use bit as unit */
u32 proto_support;
- u32 rule_num[2]; /* rule entry number */
- u16 cnt_num[2]; /* rule hit counter number */
- struct hclge_fd_key_cfg key_cfg[2];
+ u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
+ u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
+ struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
};
+#define IPV4_INDEX 3
+#define IPV6_SIZE 4
struct hclge_fd_rule_tuples {
- u8 src_mac[6];
- u8 dst_mac[6];
- u32 src_ip[4];
- u32 dst_ip[4];
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ /* Be compatible for ip address of both ipv4 and ipv6.
+ * For ipv4 address, we store it in src/dst_ip[3].
+ */
+ u32 src_ip[IPV6_SIZE];
+ u32 dst_ip[IPV6_SIZE];
u16 src_port;
u16 dst_port;
u16 vlan_tag1;
@@ -630,6 +648,8 @@ struct hclge_fd_rule {
u16 vf_id;
u16 queue_id;
u16 location;
+ u16 flow_id; /* only used for arfs */
+ enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
};
struct hclge_fd_ad_data {
@@ -679,6 +699,20 @@ struct hclge_mac_tnl_stats {
u32 status;
};
+#define HCLGE_RESET_INTERVAL (10 * HZ)
+#define HCLGE_WAIT_RESET_DONE 100
+
+#pragma pack(1)
+struct hclge_vf_vlan_cfg {
+ u8 mbx_cmd;
+ u8 subcode;
+ u8 is_kill;
+ u16 vlan;
+ u16 proto;
+};
+
+#pragma pack()
+
/* For each bit of TCAM entry, it uses a pair of 'x' and
* 'y' to indicate which value to match, like below:
* ----------------------------------
@@ -806,10 +840,15 @@ struct hclge_dev {
struct hclge_vlan_type_cfg vlan_type_cfg;
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+ unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list;
+ spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
u16 hclge_fd_rule_num;
+ u16 fd_arfs_expire_timer;
+ unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
+ enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en;
u16 wanted_umv_size;
@@ -891,13 +930,14 @@ struct hclge_vport {
u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
+ unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
struct hclge_port_base_vlan_config port_base_vlan_cfg;
struct hclge_tx_vtag_cfg txvlan_cfg;
struct hclge_rx_vtag_cfg rxvlan_cfg;
u16 used_umv_num;
- int vport_id;
+ u16 vport_id;
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
@@ -959,7 +999,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
-int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 0e04e63f2a94..a38ac7cfe16b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -29,6 +29,10 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
"PF fail to gen resp to VF len %d exceeds max len %d\n",
resp_data_len,
HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ /* If resp_data_len is too long, set the value to max length
+ * and return the msg to VF
+ */
+ resp_data_len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
}
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
@@ -93,7 +97,7 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
else if (hdev->reset_type == HNAE3_FLR_RESET)
reset_type = HNAE3_VF_FULL_RESET;
else
- return -EINVAL;
+ reset_type = HNAE3_VF_FUNC_RESET;
memcpy(&msg_data[0], &reset_type, sizeof(u16));
@@ -192,12 +196,10 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
return ret;
ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
- if (ret)
- return ret;
hclge_free_vector_ring_chain(&ring_chain);
- return 0;
+ return ret;
}
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
@@ -308,21 +310,23 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
+ struct hclge_vf_vlan_cfg *msg_cmd;
int status = 0;
- if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
+ msg_cmd = (struct hclge_vf_vlan_cfg *)mbx_req->msg;
+ if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
struct hnae3_handle *handle = &vport->nic;
u16 vlan, proto;
bool is_kill;
- is_kill = !!mbx_req->msg[2];
- memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
- memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
+ is_kill = !!msg_cmd->is_kill;
+ vlan = msg_cmd->vlan;
+ proto = msg_cmd->proto;
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill);
- } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
+ } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic;
- bool en = mbx_req->msg[2] ? true : false;
+ bool en = msg_cmd->is_kill ? true : false;
status = hclge_en_hw_strip_rxvtag(handle, en);
} else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
@@ -365,13 +369,14 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
u8 vf_tc_map = 0;
- int i, ret;
+ unsigned int i;
+ int ret;
for (i = 0; i < kinfo->num_tc; i++)
vf_tc_map |= BIT(i);
ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
- sizeof(u8));
+ sizeof(vf_tc_map));
return ret;
}
@@ -553,7 +558,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclge_vport *vport;
struct hclge_desc *desc;
- int ret, flag;
+ unsigned int flag;
+ int ret;
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 1e8134892d77..abb1b438564e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -55,9 +55,9 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
- HCLGE_MDIO_PHYID_S, phyid);
+ HCLGE_MDIO_PHYID_S, (u32)phyid);
hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
- HCLGE_MDIO_PHYREG_S, regnum);
+ HCLGE_MDIO_PHYREG_S, (u32)regnum);
hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
@@ -93,9 +93,9 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
- HCLGE_MDIO_PHYID_S, phyid);
+ HCLGE_MDIO_PHYID_S, (u32)phyid);
hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
- HCLGE_MDIO_PHYREG_S, regnum);
+ HCLGE_MDIO_PHYREG_S, (u32)regnum);
hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
@@ -224,6 +224,13 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
linkmode_and(phydev->supported, phydev->supported, mask);
linkmode_copy(phydev->advertising, phydev->supported);
+ /* supported flag is Pause and Asym Pause, but default advertising
+ * should be rx on, tx on, so need clear Asym Pause in advertising
+ * flag
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index a7bbb6d3091a..3f41fa2bc414 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -43,18 +43,23 @@ enum hclge_shaper_level {
static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
u8 *ir_b, u8 *ir_u, u8 *ir_s)
{
+#define DIVISOR_CLK (1000 * 8)
+#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
+
const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
6 * 256, /* Prioriy level */
6 * 32, /* Prioriy group level */
6 * 8, /* Port level */
6 * 256 /* Qset level */
};
- u8 ir_u_calc = 0, ir_s_calc = 0;
+ u8 ir_u_calc = 0;
+ u8 ir_s_calc = 0;
u32 ir_calc;
u32 tick;
/* Calc tick */
- if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
+ if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
+ ir > HCLGE_ETHER_MAX_RATE)
return -EINVAL;
tick = tick_array[shaper_level];
@@ -66,7 +71,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
* ir_calc = ---------------- * 1000
* tick * 1
*/
- ir_calc = (1008000 + (tick >> 1) - 1) / tick;
+ ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
if (ir_calc == ir) {
*ir_b = 126;
@@ -78,27 +83,28 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
/* Increasing the denominator to select ir_s value */
while (ir_calc > ir) {
ir_s_calc++;
- ir_calc = 1008000 / (tick * (1 << ir_s_calc));
+ ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
}
if (ir_calc == ir)
*ir_b = 126;
else
- *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
+ *ir_b = (ir * tick * (1 << ir_s_calc) +
+ (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
} else {
/* Increasing the numerator to select ir_u value */
u32 numerator;
while (ir_calc < ir) {
ir_u_calc++;
- numerator = 1008000 * (1 << ir_u_calc);
+ numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
ir_calc = (numerator + (tick >> 1)) / tick;
}
if (ir_calc == ir) {
*ir_b = 126;
} else {
- u32 denominator = (8000 * (1 << --ir_u_calc));
+ u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc));
*ir_b = (ir * tick + (denominator >> 1)) / denominator;
}
}
@@ -119,14 +125,13 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev,
opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
return -EINVAL;
- for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
+ for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
- if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1))
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
}
+ hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
+
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
if (ret)
return ret;
@@ -219,8 +224,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
trans_gap = pause_param->pause_trans_gap;
trans_time = le16_to_cpu(pause_param->pause_trans_time);
- return hclge_pause_param_cfg(hdev, mac_addr, trans_gap,
- trans_time);
+ return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
}
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
@@ -361,29 +365,36 @@ static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
+ u8 bs_b, u8 bs_s)
+{
+ u32 shapping_para = 0;
+
+ hclge_tm_set_field(shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shapping_para, BS_B, bs_b);
+ hclge_tm_set_field(shapping_para, BS_S, bs_s);
+
+ return shapping_para;
+}
+
static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pg_id,
- u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
+ u32 shapping_para)
{
struct hclge_pg_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
struct hclge_desc desc;
- u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
- HCLGE_OPC_TM_PG_C_SHAPPING;
+ HCLGE_OPC_TM_PG_C_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, opcode, false);
shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
shap_cfg_cmd->pg_id = pg_id;
- hclge_tm_set_field(shapping_para, IR_B, ir_b);
- hclge_tm_set_field(shapping_para, IR_U, ir_u);
- hclge_tm_set_field(shapping_para, IR_S, ir_s);
- hclge_tm_set_field(shapping_para, BS_B, bs_b);
- hclge_tm_set_field(shapping_para, BS_S, bs_s);
-
shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -397,7 +408,7 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
u8 ir_u, ir_b, ir_s;
int ret;
- ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE,
+ ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
HCLGE_SHAPER_LVL_PORT,
&ir_b, &ir_u, &ir_s);
if (ret)
@@ -406,11 +417,9 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
- hclge_tm_set_field(shapping_para, IR_B, ir_b);
- hclge_tm_set_field(shapping_para, IR_U, ir_u);
- hclge_tm_set_field(shapping_para, IR_S, ir_s);
- hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
- hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
+ shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
@@ -419,16 +428,14 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pri_id,
- u8 ir_b, u8 ir_u, u8 ir_s,
- u8 bs_b, u8 bs_s)
+ u32 shapping_para)
{
struct hclge_pri_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
struct hclge_desc desc;
- u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
- HCLGE_OPC_TM_PRI_C_SHAPPING;
+ HCLGE_OPC_TM_PRI_C_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, opcode, false);
@@ -436,12 +443,6 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pri_id = pri_id;
- hclge_tm_set_field(shapping_para, IR_B, ir_b);
- hclge_tm_set_field(shapping_para, IR_U, ir_u);
- hclge_tm_set_field(shapping_para, IR_S, ir_s);
- hclge_tm_set_field(shapping_para, BS_B, bs_b);
- hclge_tm_set_field(shapping_para, BS_S, bs_s);
-
shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -531,6 +532,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
max_rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / kinfo->num_tc);
+ /* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) {
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
@@ -538,6 +540,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
+ /* Set to the maximum specification value (max_rss_size). */
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
kinfo->rss_size, max_rss_size);
kinfo->rss_size = max_rss_size;
@@ -595,8 +598,10 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
hdev->tm_info.prio_tc[i] =
(i >= hdev->tm_info.num_tc) ? 0 : i;
- /* DCB is enabled if we have more than 1 TC */
- if (hdev->tm_info.num_tc > 1)
+ /* DCB is enabled if we have more than 1 TC or pfc_en is
+ * non-zero.
+ */
+ if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
else
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
@@ -604,12 +609,14 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
{
+#define BW_PERCENT 100
+
u8 i;
for (i = 0; i < hdev->tm_info.num_pg; i++) {
int k;
- hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
+ hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
hdev->tm_info.pg_info[i].pg_id = i;
hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
@@ -621,7 +628,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++)
- hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
+ hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
}
}
@@ -682,6 +689,7 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
{
u8 ir_u, ir_b, ir_s;
+ u32 shaper_para;
int ret;
u32 i;
@@ -699,18 +707,21 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
+ shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_C_BUCKET, i,
- 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
+ shaper_para);
if (ret)
return ret;
+ shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_P_BUCKET, i,
- ir_b, ir_u, ir_s,
- HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
+ shaper_para);
if (ret)
return ret;
}
@@ -730,8 +741,7 @@ static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
/* pg to prio */
for (i = 0; i < hdev->tm_info.num_pg; i++) {
/* Cfg dwrr */
- ret = hclge_tm_pg_weight_cfg(hdev, i,
- hdev->tm_info.pg_dwrr[i]);
+ ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
if (ret)
return ret;
}
@@ -811,6 +821,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
{
u8 ir_u, ir_b, ir_s;
+ u32 shaper_para;
int ret;
u32 i;
@@ -822,17 +833,19 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_tm_pri_shapping_cfg(
- hdev, HCLGE_TM_SHAP_C_BUCKET, i,
- 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
+ shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
+ shaper_para);
if (ret)
return ret;
- ret = hclge_tm_pri_shapping_cfg(
- hdev, HCLGE_TM_SHAP_P_BUCKET, i,
- ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
+ shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
+ shaper_para);
if (ret)
return ret;
}
@@ -844,6 +857,7 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
u8 ir_u, ir_b, ir_s;
+ u32 shaper_para;
int ret;
ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
@@ -851,18 +865,19 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
if (ret)
return ret;
+ shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
- vport->vport_id,
- 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
+ vport->vport_id, shaper_para);
if (ret)
return ret;
+ shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
- vport->vport_id,
- ir_b, ir_u, ir_s,
- HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
+ vport->vport_id, shaper_para);
if (ret)
return ret;
@@ -964,7 +979,7 @@ static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
struct hclge_ets_tc_weight_cmd *ets_weight;
struct hclge_desc desc;
- int i;
+ unsigned int i;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
@@ -1124,6 +1139,9 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
int ret;
u8 i;
+ if (vport->vport_id >= HNAE3_MAX_TC)
+ return -EINVAL;
+
ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
if (ret)
return ret;
@@ -1212,8 +1230,8 @@ static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
struct hclge_mac *mac = &hdev->hw.mac;
return hclge_pause_param_cfg(hdev, mac->mac_addr,
- HCLGE_DEFAULT_PAUSE_TRANS_GAP,
- HCLGE_DEFAULT_PAUSE_TRANS_TIME);
+ HCLGE_DEFAULT_PAUSE_TRANS_GAP,
+ HCLGE_DEFAULT_PAUSE_TRANS_TIME);
}
static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
@@ -1358,7 +1376,8 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
{
- u8 i, bit_map = 0;
+ u8 bit_map = 0;
+ u8 i;
hdev->tm_info.num_tc = num_tc;
@@ -1375,6 +1394,19 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hclge_tm_schd_info_init(hdev);
}
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
+{
+ /* DCB is enabled if we have more than 1 TC or pfc_en is
+ * non-zero.
+ */
+ if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
+ hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+ else
+ hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+
+ hclge_pfc_info_init(hdev);
+}
+
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
{
int ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index f60e540c7a62..818610988d34 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -12,7 +12,7 @@
#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
-#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF
+#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0x7F
#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
/* SP or DWRR */
@@ -147,6 +147,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init);
int hclge_tm_schd_setup_hw(struct hclge_dev *hdev);
void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index 6193f8fa7cf3..53804d95ea90 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -6,4 +6,4 @@
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
-hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
+hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 71f356fc2446..652b796044e3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -98,7 +98,6 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
@@ -110,7 +109,6 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
@@ -179,6 +177,38 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
}
+static int hclgevf_cmd_convert_err_code(u16 desc_ret)
+{
+ switch (desc_ret) {
+ case HCLGEVF_CMD_EXEC_SUCCESS:
+ return 0;
+ case HCLGEVF_CMD_NO_AUTH:
+ return -EPERM;
+ case HCLGEVF_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HCLGEVF_CMD_QUEUE_FULL:
+ return -EXFULL;
+ case HCLGEVF_CMD_NEXT_ERR:
+ return -ENOSR;
+ case HCLGEVF_CMD_UNEXE_ERR:
+ return -ENOTBLK;
+ case HCLGEVF_CMD_PARA_ERR:
+ return -EINVAL;
+ case HCLGEVF_CMD_RESULT_ERR:
+ return -ERANGE;
+ case HCLGEVF_CMD_TIMEOUT:
+ return -ETIME;
+ case HCLGEVF_CMD_HILINK_ERR:
+ return -ENOLINK;
+ case HCLGEVF_CMD_QUEUE_ILLEGAL:
+ return -ENXIO;
+ case HCLGEVF_CMD_INVALID:
+ return -EBADR;
+ default:
+ return -EIO;
+ }
+}
+
/* hclgevf_cmd_send - send command to command queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor for describing the command
@@ -190,6 +220,7 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
{
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+ struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
struct hclgevf_desc *desc_to_use;
bool complete = false;
u32 timeout = 0;
@@ -201,8 +232,17 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclgevf_ring_space(&hw->cmq.csq) ||
- test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+ /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
+ * need update the SW HEAD pointer csq->next_to_clean
+ */
+ csq->next_to_clean = hclgevf_read_dev(hw,
+ HCLGEVF_NIC_CSQ_HEAD_REG);
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -251,11 +291,7 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
else
retval = le16_to_cpu(desc[0].retval);
- if ((enum hclgevf_cmd_return_status)retval ==
- HCLGEVF_CMD_EXEC_SUCCESS)
- status = 0;
- else
- status = -EIO;
+ status = hclgevf_cmd_convert_err_code(retval);
hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
ntc++;
handle++;
@@ -265,14 +301,13 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
}
if (!complete)
- status = -EAGAIN;
+ status = -EBADE;
/* Clean the command send queue */
handle = hclgevf_cmd_csq_clean(hw);
- if (handle != num) {
+ if (handle != num)
dev_warn(&hdev->pdev->dev,
"cleaned %d, need to clean %d\n", handle, num);
- }
spin_unlock_bh(&hw->cmq.csq.lock);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 47030b42341f..127a434a56f3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -46,9 +46,17 @@ struct hclgevf_cmq_ring {
enum hclgevf_cmd_return_status {
HCLGEVF_CMD_EXEC_SUCCESS = 0,
- HCLGEVF_CMD_NO_AUTH = 1,
- HCLGEVF_CMD_NOT_EXEC = 2,
- HCLGEVF_CMD_QUEUE_FULL = 3,
+ HCLGEVF_CMD_NO_AUTH = 1,
+ HCLGEVF_CMD_NOT_SUPPORTED = 2,
+ HCLGEVF_CMD_QUEUE_FULL = 3,
+ HCLGEVF_CMD_NEXT_ERR = 4,
+ HCLGEVF_CMD_UNEXE_ERR = 5,
+ HCLGEVF_CMD_PARA_ERR = 6,
+ HCLGEVF_CMD_RESULT_ERR = 7,
+ HCLGEVF_CMD_TIMEOUT = 8,
+ HCLGEVF_CMD_HILINK_ERR = 9,
+ HCLGEVF_CMD_QUEUE_ILLEGAL = 10,
+ HCLGEVF_CMD_INVALID = 11,
};
enum hclgevf_cmd_status {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 5d53467ee2d2..a13a0e101c3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -11,6 +11,8 @@
#define HCLGEVF_NAME "hclgevf"
+#define HCLGEVF_RESET_MAX_FAIL_CNT 5
+
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
static struct hnae3_ae_algo ae_algovf;
@@ -83,8 +85,7 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
HCLGEVF_TQP_INTR_GL2_REG,
HCLGEVF_TQP_INTR_RL_REG};
-static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
- struct hnae3_handle *handle)
+static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
{
if (!handle->client)
return container_of(handle, struct hclgevf_dev, nic);
@@ -232,7 +233,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
int status;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
- true, &resp_msg, sizeof(u8));
+ true, &resp_msg, sizeof(resp_msg));
if (status) {
dev_err(&hdev->pdev->dev,
"VF request to get TC info from PF failed %d",
@@ -321,7 +322,8 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
- 2, true, resp_data, 2);
+ sizeof(msg_data), true, resp_data,
+ sizeof(resp_data));
if (!ret)
qid_in_pf = *(u16 *)resp_data;
@@ -382,7 +384,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
struct hnae3_handle *nic = &hdev->nic;
struct hnae3_knic_private_info *kinfo;
u16 new_tqps = hdev->num_tqps;
- int i;
+ unsigned int i;
kinfo = &nic->kinfo;
kinfo->num_tc = 0;
@@ -418,7 +420,7 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
u8 resp_msg;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
- 0, false, &resp_msg, sizeof(u8));
+ 0, false, &resp_msg, sizeof(resp_msg));
if (status)
dev_err(&hdev->pdev->dev,
"VF failed to fetch link status(%d) from PF", status);
@@ -453,11 +455,13 @@ static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
u8 resp_msg;
send_msg = HCLGEVF_ADVERTISING;
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
- sizeof(u8), false, &resp_msg, sizeof(u8));
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
+ &send_msg, sizeof(send_msg), false,
+ &resp_msg, sizeof(resp_msg));
send_msg = HCLGEVF_SUPPORTED;
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
- sizeof(u8), false, &resp_msg, sizeof(u8));
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
+ &send_msg, sizeof(send_msg), false,
+ &resp_msg, sizeof(resp_msg));
}
static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
@@ -470,12 +474,6 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
nic->numa_node_mask = hdev->numa_node_mask;
nic->flags |= HNAE3_SUPPORT_VF;
- if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
- dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
- hdev->ae_dev->dev_type);
- return -EINVAL;
- }
-
ret = hclgevf_knic_setup(hdev);
if (ret)
dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
@@ -544,14 +542,16 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
const u8 hfunc, const u8 *key)
{
struct hclgevf_rss_config_cmd *req;
+ unsigned int key_offset = 0;
struct hclgevf_desc desc;
- int key_offset;
+ int key_counts;
int key_size;
int ret;
+ key_counts = HCLGEVF_RSS_KEY_SIZE;
req = (struct hclgevf_rss_config_cmd *)desc.data;
- for (key_offset = 0; key_offset < 3; key_offset++) {
+ while (key_counts) {
hclgevf_cmd_setup_basic_desc(&desc,
HCLGEVF_OPC_RSS_GENERIC_CONFIG,
false);
@@ -560,15 +560,12 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
req->hash_config |=
(key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
- if (key_offset == 2)
- key_size =
- HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
- else
- key_size = HCLGEVF_RSS_HASH_KEY_NUM;
-
+ key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
memcpy(req->hash_key,
key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
+ key_counts -= key_size;
+ key_offset++;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -631,7 +628,7 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
struct hclgevf_desc desc;
u16 roundup_size;
int status;
- int i;
+ unsigned int i;
req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
@@ -997,6 +994,8 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
u8 type;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+ type = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
+ HCLGE_MBX_UNMAP_RING_TO_VECTOR;
for (node = ring_chain; node; node = node->next) {
int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
@@ -1006,9 +1005,6 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
hclgevf_cmd_setup_basic_desc(&desc,
HCLGEVF_OPC_MBX_VF_TO_PF,
false);
- type = en ?
- HCLGE_MBX_MAP_RING_TO_VECTOR :
- HCLGE_MBX_UNMAP_RING_TO_VECTOR;
req->msg[0] = type;
req->msg[1] = vector_id;
}
@@ -1134,7 +1130,7 @@ static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc)
return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc);
}
-static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
+static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable)
{
struct hclgevf_cfg_com_tqp_queue_cmd *req;
@@ -1147,7 +1143,8 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
false);
req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
req->stream_id = cpu_to_le16(stream_id);
- req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
+ if (enable)
+ req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -1193,7 +1190,7 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
HCLGE_MBX_MAC_VLAN_UC_MODIFY;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
- subcode, msg_data, ETH_ALEN * 2,
+ subcode, msg_data, sizeof(msg_data),
true, NULL, 0);
if (!status)
ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
@@ -1248,19 +1245,61 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
#define HCLGEVF_VLAN_MBX_MSG_LEN 5
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
+ int ret;
- if (vlan_id > 4095)
+ if (vlan_id > HCLGEVF_MAX_VLAN_ID)
return -EINVAL;
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
+ /* When device is resetting, firmware is unable to handle
+ * mailbox. Just record the vlan id, and remove it after
+ * reset finished.
+ */
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ return -EBUSY;
+ }
+
msg_data[0] = is_kill;
memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
memcpy(&msg_data[3], &proto, sizeof(proto));
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_VLAN_FILTER, msg_data,
- HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER, msg_data,
+ HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+
+ /* When remove hw vlan filter failed, record the vlan id,
+ * and try to remove it from hw later, to be consistence
+ * with stack.
+ */
+ if (is_kill && ret)
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+
+ return ret;
+}
+
+static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_MAX_SYNC_COUNT 60
+ struct hnae3_handle *handle = &hdev->nic;
+ int ret, sync_cnt = 0;
+ u16 vlan_id;
+
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ while (vlan_id != VLAN_N_VID) {
+ ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
+ vlan_id, true);
+ if (ret)
+ return;
+
+ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ sync_cnt++;
+ if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
+ return;
+
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ }
}
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
@@ -1280,7 +1319,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
u8 msg_data[2];
int ret;
- memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
+ memcpy(msg_data, &queue_id, sizeof(queue_id));
/* disable vf queue before send queue reset msg to PF */
ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
@@ -1288,7 +1327,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
return ret;
return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
- 2, true, NULL, 0);
+ sizeof(msg_data), true, NULL, 0);
}
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
@@ -1306,6 +1345,10 @@ static int hclgevf_notify_client(struct hclgevf_dev *hdev,
struct hnae3_handle *handle = &hdev->nic;
int ret;
+ if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
+ !client)
+ return 0;
+
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
@@ -1410,6 +1453,8 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
{
+#define HCLGEVF_RESET_SYNC_TIME 100
+
int ret = 0;
switch (hdev->reset_type) {
@@ -1427,13 +1472,34 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
}
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
-
+ /* inform hardware that preparatory work is done */
+ msleep(HCLGEVF_RESET_SYNC_TIME);
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ HCLGEVF_NIC_CMQ_ENABLE);
dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
hdev->reset_type, ret);
return ret;
}
+static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
+{
+ hdev->rst_stats.rst_fail_cnt++;
+ dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n",
+ hdev->rst_stats.rst_fail_cnt);
+
+ if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+
+ if (hclgevf_is_reset_pending(hdev)) {
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ hclgevf_reset_task_schedule(hdev);
+ } else {
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ HCLGEVF_NIC_CMQ_ENABLE);
+ }
+}
+
static int hclgevf_reset(struct hclgevf_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
@@ -1490,19 +1556,13 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
hdev->last_reset_time = jiffies;
ae_dev->reset_type = HNAE3_NONE_RESET;
hdev->rst_stats.rst_done_cnt++;
+ hdev->rst_stats.rst_fail_cnt = 0;
return ret;
err_reset_lock:
rtnl_unlock();
err_reset:
- /* When VF reset failed, only the higher level reset asserted by PF
- * can restore it, so re-initialize the command queue to receive
- * this higher reset event.
- */
- hclgevf_cmd_init(hdev);
- dev_err(&hdev->pdev->dev, "failed to reset VF\n");
- if (hclgevf_is_reset_pending(hdev))
- hclgevf_reset_task_schedule(hdev);
+ hclgevf_reset_err_handle(hdev);
return ret;
}
@@ -1612,7 +1672,8 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
- if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) {
+ if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
+ !test_bit(HCLGEVF_STATE_REMOVING, &hdev->state)) {
set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
schedule_work(&hdev->rst_service_task);
}
@@ -1648,7 +1709,8 @@ static void hclgevf_service_timer(struct timer_list *t)
{
struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
- mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
+ mod_timer(&hdev->service_timer, jiffies +
+ HCLGEVF_GENERAL_TASK_INTERVAL * HZ);
hdev->stats_timer++;
hclgevf_task_schedule(hdev);
@@ -1668,9 +1730,9 @@ static void hclgevf_reset_service_task(struct work_struct *work)
if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
&hdev->reset_state)) {
/* PF has initmated that it is about to reset the hardware.
- * We now have to poll & check if harware has actually completed
- * the reset sequence. On hardware reset completion, VF needs to
- * reset the client and ae device.
+ * We now have to poll & check if hardware has actually
+ * completed the reset sequence. On hardware reset completion,
+ * VF needs to reset the client and ae device.
*/
hdev->reset_attempts = 0;
@@ -1686,7 +1748,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
- * 1. reset was initiated due to watchdog timeout due to
+ * 1. reset was initiated due to watchdog timeout caused by
* a. IMP was earlier reset and our TX got choked down and
* which resulted in watchdog reacting and inducing VF
* reset. This also means our cmdq would be unreliable.
@@ -1748,7 +1810,8 @@ static void hclgevf_keep_alive_timer(struct timer_list *t)
struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
schedule_work(&hdev->keep_alive_task);
- mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+ mod_timer(&hdev->keep_alive_timer, jiffies +
+ HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
}
static void hclgevf_keep_alive_task(struct work_struct *work)
@@ -1763,7 +1826,7 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
return;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
- 0, false, &respmsg, sizeof(u8));
+ 0, false, &respmsg, sizeof(respmsg));
if (ret)
dev_err(&hdev->pdev->dev,
"VF sends keep alive cmd failed(=%d)\n", ret);
@@ -1789,6 +1852,8 @@ static void hclgevf_service_task(struct work_struct *work)
hclgevf_update_link_mode(hdev);
+ hclgevf_sync_vlan_filter(hdev);
+
hclgevf_deferred_task_schedule(hdev);
clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
@@ -1995,7 +2060,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
}
- /* Initialize RSS indirect table for each vport */
+ /* Initialize RSS indirect table */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
@@ -2008,9 +2073,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
{
- /* other vlan config(like, VLAN TX/RX offload) would also be added
- * here later
- */
return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
false);
}
@@ -2032,7 +2094,6 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- /* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev);
@@ -2056,7 +2117,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
if (hclgevf_reset_tqp(handle, i))
break;
- /* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0);
}
@@ -2080,7 +2140,8 @@ static int hclgevf_client_start(struct hnae3_handle *handle)
if (ret)
return ret;
- mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+ mod_timer(&hdev->keep_alive_timer, jiffies +
+ HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
return 0;
}
@@ -2123,6 +2184,7 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
{
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
if (hdev->keep_alive_timer.function)
del_timer_sync(&hdev->keep_alive_timer);
@@ -2249,49 +2311,68 @@ static void hclgevf_info_show(struct hclgevf_dev *hdev)
dev_info(dev, "VF info end.\n");
}
-static int hclgevf_init_client_instance(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
+static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hnae3_client *client)
{
struct hclgevf_dev *hdev = ae_dev->priv;
int ret;
- switch (client->type) {
- case HNAE3_CLIENT_KNIC:
- hdev->nic_client = client;
- hdev->nic.client = client;
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
- ret = client->ops->init_instance(&hdev->nic);
- if (ret)
- goto clear_nic;
+ set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+ hnae3_set_client_init_flag(client, ae_dev, 1);
- hnae3_set_client_init_flag(client, ae_dev, 1);
+ if (netif_msg_drv(&hdev->nic))
+ hclgevf_info_show(hdev);
- if (netif_msg_drv(&hdev->nic))
- hclgevf_info_show(hdev);
+ return 0;
+}
- if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
- struct hnae3_client *rc = hdev->roce_client;
+static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hnae3_client *client)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
- ret = hclgevf_init_roce_base_info(hdev);
- if (ret)
- goto clear_roce;
- ret = rc->ops->init_instance(&hdev->roce);
- if (ret)
- goto clear_roce;
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
+ !hdev->nic_client)
+ return 0;
- hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
- 1);
- }
- break;
- case HNAE3_CLIENT_UNIC:
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+
+ ret = client->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ return 0;
+}
+
+static int hclgevf_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
hdev->nic_client = client;
hdev->nic.client = client;
- ret = client->ops->init_instance(&hdev->nic);
+ ret = hclgevf_init_nic_client_instance(ae_dev, client);
if (ret)
goto clear_nic;
- hnae3_set_client_init_flag(client, ae_dev, 1);
+ ret = hclgevf_init_roce_client_instance(ae_dev,
+ hdev->roce_client);
+ if (ret)
+ goto clear_roce;
+
break;
case HNAE3_CLIENT_ROCE:
if (hnae3_dev_roce_supported(hdev)) {
@@ -2299,17 +2380,10 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
hdev->roce.client = client;
}
- if (hdev->roce_client && hdev->nic_client) {
- ret = hclgevf_init_roce_base_info(hdev);
- if (ret)
- goto clear_roce;
-
- ret = client->ops->init_instance(&hdev->roce);
- if (ret)
- goto clear_roce;
- }
+ ret = hclgevf_init_roce_client_instance(ae_dev, client);
+ if (ret)
+ goto clear_roce;
- hnae3_set_client_init_flag(client, ae_dev, 1);
break;
default:
return -EINVAL;
@@ -2342,6 +2416,8 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
/* un-init nic/unic, if this was not called by roce client */
if (client->ops->uninit_instance && hdev->nic_client &&
client->type != HNAE3_CLIENT_ROCE) {
+ clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+
client->ops->uninit_instance(&hdev->nic, 0);
hdev->nic_client = NULL;
hdev->nic.client = NULL;
@@ -2512,6 +2588,12 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
+ if (pdev->revision >= 0x21) {
+ ret = hclgevf_set_promisc_mode(hdev, true);
+ if (ret)
+ return ret;
+ }
+
dev_info(&hdev->pdev->dev, "Reset done\n");
return 0;
@@ -2591,9 +2673,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
* firmware makes sure broadcast packets can be accepted.
* For revision 0x21, default to enable broadcast promisc mode.
*/
- ret = hclgevf_set_promisc_mode(hdev, true);
- if (ret)
- goto err_config;
+ if (pdev->revision >= 0x21) {
+ ret = hclgevf_set_promisc_mode(hdev, true);
+ if (ret)
+ goto err_config;
+ }
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index cc52f54f8c08..5a9e30998a8f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -4,6 +4,7 @@
#ifndef __HCLGEVF_MAIN_H
#define __HCLGEVF_MAIN_H
#include <linux/fs.h>
+#include <linux/if_vlan.h>
#include <linux/types.h>
#include "hclge_mbx.h"
#include "hclgevf_cmd.h"
@@ -12,9 +13,12 @@
#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
+#define HCLGEVF_MAX_VLAN_ID 4095
#define HCLGEVF_MISC_VECTOR_NUM 0
#define HCLGEVF_INVALID_VPORT 0xffff
+#define HCLGEVF_GENERAL_TASK_INTERVAL 5
+#define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2
/* This number in actual depends upon the total number of VFs
* created by physical function. But the maximum number of
@@ -130,6 +134,8 @@ enum hclgevf_states {
HCLGEVF_STATE_DOWN,
HCLGEVF_STATE_DISABLED,
HCLGEVF_STATE_IRQ_INITED,
+ HCLGEVF_STATE_REMOVING,
+ HCLGEVF_STATE_NIC_REGISTERED,
/* task states */
HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
@@ -220,6 +226,7 @@ struct hclgevf_rst_stats {
u32 vf_rst_cnt; /* the number of VF reset */
u32 rst_done_cnt; /* the number of reset completed */
u32 hw_rst_done_cnt; /* the number of HW reset completed */
+ u32 rst_fail_cnt; /* the number of VF reset fail */
};
struct hclgevf_dev {
@@ -265,6 +272,8 @@ struct hclgevf_dev {
u16 *vector_status;
int *vector_irq;
+ unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
+
bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 30f2e9352cf3..f60b80bd605e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -102,7 +102,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
~HCLGE_MBX_NEED_RESP_BIT;
req->msg[0] = code;
req->msg[1] = subcode;
- memcpy(&req->msg[2], msg_data, msg_len);
+ if (msg_data)
+ memcpy(&req->msg[2], msg_data, msg_len);
/* synchronous send */
if (need_resp) {