summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c323
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c1289
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h156
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c542
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c206
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c804
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h107
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c8
55 files changed, 4043 insertions, 1427 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ebc1f566a4d9..9a7a2f05ab35 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -199,6 +199,10 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"rx_xdp_drop",
"rx_xdp_tx",
"rx_xdp_tx_full",
+
+ /* phy statistics */
+ "rx_packets_phy", "rx_bytes_phy",
+ "tx_packets_phy", "tx_bytes_phy",
};
static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
@@ -411,6 +415,10 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->xdp_stats)[i];
+ for (i = 0; i < NUM_PHY_STATS; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] = ((unsigned long *)&priv->phy_stats)[i];
+
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
data[index++] = priv->tx_ring[TX][i]->packets;
data[index++] = priv->tx_ring[TX][i]->bytes;
@@ -490,6 +498,12 @@ static void mlx4_en_get_strings(struct net_device *dev,
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
+ for (i = 0; i < NUM_PHY_STATS; i++, strings++,
+ bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[strings]);
+
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_packets", i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 8fc51bc29003..e0adac4a9a19 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3256,6 +3256,10 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
last_i += NUM_XDP_STATS;
+
+ if (!mlx4_is_slave(dev))
+ bitmap_set(stats_bitmap->bitmap, last_i, NUM_PHY_STATS);
+ last_i += NUM_PHY_STATS;
}
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -3630,10 +3634,6 @@ int mlx4_en_reset_config(struct net_device *dev,
mlx4_en_stop_port(dev, 1);
}
- en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
- ts_config.rx_filter,
- !!(features & NETIF_F_HW_VLAN_CTAG_RX));
-
mlx4_en_safe_replace_resources(priv, tmp);
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 1fa4849a6f56..0158b88bea5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -275,19 +275,31 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.xmit_more += READ_ONCE(ring->xmit_more);
}
- if (mlx4_is_master(mdev->dev)) {
- stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
- &mlx4_en_stats->RTOT_prio_1,
- NUM_PRIORITIES);
- stats->tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
- &mlx4_en_stats->TTOT_prio_1,
- NUM_PRIORITIES);
- stats->rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
- &mlx4_en_stats->ROCT_prio_1,
- NUM_PRIORITIES);
- stats->tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
- &mlx4_en_stats->TOCT_prio_1,
- NUM_PRIORITIES);
+ if (!mlx4_is_slave(mdev->dev)) {
+ struct mlx4_en_phy_stats *p_stats = &priv->phy_stats;
+
+ p_stats->rx_packets_phy =
+ en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
+ &mlx4_en_stats->RTOT_prio_1,
+ NUM_PRIORITIES);
+ p_stats->tx_packets_phy =
+ en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
+ &mlx4_en_stats->TTOT_prio_1,
+ NUM_PRIORITIES);
+ p_stats->rx_bytes_phy =
+ en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
+ &mlx4_en_stats->ROCT_prio_1,
+ NUM_PRIORITIES);
+ p_stats->tx_bytes_phy =
+ en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
+ &mlx4_en_stats->TOCT_prio_1,
+ NUM_PRIORITIES);
+ if (mlx4_is_master(mdev->dev)) {
+ stats->rx_packets = p_stats->rx_packets_phy;
+ stats->tx_packets = p_stats->tx_packets_phy;
+ stats->rx_bytes = p_stats->rx_bytes_phy;
+ stats->tx_bytes = p_stats->tx_bytes_phy;
+ }
}
/* net device stats */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b4d144e67514..05787efef492 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -291,13 +291,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
- ring->rx_info = vzalloc_node(tmp, node);
+ ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
if (!ring->rx_info) {
- ring->rx_info = vzalloc(tmp);
- if (!ring->rx_info) {
- err = -ENOMEM;
- goto err_xdp_info;
- }
+ err = -ENOMEM;
+ goto err_xdp_info;
}
en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
@@ -318,7 +315,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
return 0;
err_info:
- vfree(ring->rx_info);
+ kvfree(ring->rx_info);
ring->rx_info = NULL;
err_xdp_info:
xdp_rxq_info_unreg(&ring->xdp_rxq);
@@ -447,7 +444,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
bpf_prog_put(old_prog);
xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
- vfree(ring->rx_info);
+ kvfree(ring->rx_info);
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
@@ -649,6 +646,12 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
}
+#if IS_ENABLED(CONFIG_IPV6)
+#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV6)
+#else
+#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4)
+#endif
+
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -662,12 +665,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int polled = 0;
int index;
- if (unlikely(!priv->port_up))
+ if (unlikely(!priv->port_up || budget <= 0))
return 0;
- if (unlikely(budget <= 0))
- return polled;
-
ring = priv->rx_ring[cq_ring];
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
@@ -838,12 +838,7 @@ xdp_drop_no_cnt:
ring->csum_ok++;
} else {
if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
- (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
-#if IS_ENABLED(CONFIG_IPV6)
- MLX4_CQE_STATUS_IPV6))))
-#else
- 0))))
-#endif
+ (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IP_ANY))))
goto csum_none;
if (check_csum(cqe, skb, va, dev->features))
goto csum_none;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f470ae37d937..f7c81133594f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -608,6 +608,7 @@ struct mlx4_en_priv {
struct mlx4_en_flow_stats_tx tx_flowstats;
struct mlx4_en_port_stats port_stats;
struct mlx4_en_xdp_stats xdp_stats;
+ struct mlx4_en_phy_stats phy_stats;
struct mlx4_en_stats_bitmap stats_bitmap;
struct list_head mc_list;
struct list_head curr_list;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index aab28eb27a30..86b6051da8ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -63,6 +63,14 @@ struct mlx4_en_xdp_stats {
#define NUM_XDP_STATS 3
};
+struct mlx4_en_phy_stats {
+ unsigned long rx_packets_phy;
+ unsigned long rx_bytes_phy;
+ unsigned long tx_packets_phy;
+ unsigned long tx_bytes_phy;
+#define NUM_PHY_STATS 4
+};
+
#define NUM_MAIN_STATS 21
#define MLX4_NUM_PRIORITIES 8
@@ -116,7 +124,7 @@ enum {
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS + \
- NUM_XDP_STATS)
+ NUM_XDP_STATS + NUM_PHY_STATS)
#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
sizeof(((struct net_device_stats *)0)->n))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
index 53e69edaedde..9f1b1939716a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
@@ -37,24 +37,11 @@
#include "mlx5_core.h"
#include "fpga/ipsec.h"
-void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
- struct mlx5_accel_ipsec_sa *cmd)
-{
- if (!MLX5_IPSEC_DEV(mdev))
- return ERR_PTR(-EOPNOTSUPP);
-
- return mlx5_fpga_ipsec_sa_cmd_exec(mdev, cmd);
-}
-
-int mlx5_accel_ipsec_sa_cmd_wait(void *ctx)
-{
- return mlx5_fpga_ipsec_sa_cmd_wait(ctx);
-}
-
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_ipsec_device_caps(mdev);
}
+EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps);
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
{
@@ -67,6 +54,21 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
return mlx5_fpga_ipsec_counters_read(mdev, counters, count);
}
+void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm *xfrm,
+ const __be32 saddr[4],
+ const __be32 daddr[4],
+ const __be32 spi, bool is_ipv6)
+{
+ return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, daddr,
+ spi, is_ipv6);
+}
+
+void mlx5_accel_esp_free_hw_context(void *context)
+{
+ mlx5_fpga_ipsec_delete_sa_ctx(context);
+}
+
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_ipsec_init(mdev);
@@ -76,3 +78,32 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
mlx5_fpga_ipsec_cleanup(mdev);
}
+
+struct mlx5_accel_esp_xfrm *
+mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs,
+ u32 flags)
+{
+ struct mlx5_accel_esp_xfrm *xfrm;
+
+ xfrm = mlx5_fpga_esp_create_xfrm(mdev, attrs, flags);
+ if (IS_ERR(xfrm))
+ return xfrm;
+
+ xfrm->mdev = mdev;
+ return xfrm;
+}
+EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
+
+void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
+{
+ mlx5_fpga_esp_destroy_xfrm(xfrm);
+}
+EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
+
+int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ return mlx5_fpga_esp_modify_xfrm(xfrm, attrs);
+}
+EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
index d6e20fea9554..024dbd22a89b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
@@ -34,89 +34,25 @@
#ifndef __MLX5_ACCEL_IPSEC_H__
#define __MLX5_ACCEL_IPSEC_H__
-#ifdef CONFIG_MLX5_ACCEL
-
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/accel.h>
-enum {
- MLX5_ACCEL_IPSEC_DEVICE = BIT(1),
- MLX5_ACCEL_IPSEC_IPV6 = BIT(2),
- MLX5_ACCEL_IPSEC_ESP = BIT(3),
- MLX5_ACCEL_IPSEC_LSO = BIT(4),
-};
-
-#define MLX5_IPSEC_SADB_IP_AH BIT(7)
-#define MLX5_IPSEC_SADB_IP_ESP BIT(6)
-#define MLX5_IPSEC_SADB_SA_VALID BIT(5)
-#define MLX5_IPSEC_SADB_SPI_EN BIT(4)
-#define MLX5_IPSEC_SADB_DIR_SX BIT(3)
-#define MLX5_IPSEC_SADB_IPV6 BIT(2)
-
-enum {
- MLX5_IPSEC_CMD_ADD_SA = 0,
- MLX5_IPSEC_CMD_DEL_SA = 1,
-};
-
-enum mlx5_accel_ipsec_enc_mode {
- MLX5_IPSEC_SADB_MODE_NONE = 0,
- MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128 = 1,
- MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128 = 3,
-};
+#ifdef CONFIG_MLX5_ACCEL
#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
- MLX5_ACCEL_IPSEC_DEVICE)
-
-struct mlx5_accel_ipsec_sa {
- __be32 cmd;
- u8 key_enc[32];
- u8 key_auth[32];
- __be32 sip[4];
- __be32 dip[4];
- union {
- struct {
- __be32 reserved;
- u8 salt_iv[8];
- __be32 salt;
- } __packed gcm;
- struct {
- u8 salt[16];
- } __packed cbc;
- };
- __be32 spi;
- __be32 sw_sa_handle;
- __be16 tfclen;
- u8 enc_mode;
- u8 sip_masklen;
- u8 dip_masklen;
- u8 flags;
- u8 reserved[2];
-} __packed;
-
-/**
- * mlx5_accel_ipsec_sa_cmd_exec - Execute an IPSec SADB command
- * @mdev: mlx5 device
- * @cmd: command to execute
- * May be called from atomic context. Returns context pointer, or error
- * Caller must eventually call mlx5_accel_ipsec_sa_cmd_wait from non-atomic
- * context, to cleanup the context pointer
- */
-void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
- struct mlx5_accel_ipsec_sa *cmd);
-
-/**
- * mlx5_accel_ipsec_sa_cmd_wait - Wait for command execution completion
- * @context: Context pointer returned from call to mlx5_accel_ipsec_sa_cmd_exec
- * Sleeps (killable) until command execution is complete.
- * Returns the command result, or -EINTR if killed
- */
-int mlx5_accel_ipsec_sa_cmd_wait(void *context);
-
-u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
+ MLX5_ACCEL_IPSEC_CAP_DEVICE)
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int count);
+void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm *xfrm,
+ const __be32 saddr[4],
+ const __be32 daddr[4],
+ const __be32 spi, bool is_ipv6);
+void mlx5_accel_esp_free_hw_context(void *context);
+
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
@@ -124,6 +60,20 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
#define MLX5_IPSEC_DEV(mdev) false
+static inline void *
+mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm *xfrm,
+ const __be32 saddr[4],
+ const __be32 daddr[4],
+ const __be32 spi, bool is_ipv6)
+{
+ return NULL;
+}
+
+static inline void mlx5_accel_esp_free_hw_context(void *context)
+{
+}
+
static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 47239bf7bf43..323ffe8bf7e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -71,19 +71,24 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
}
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_buf *buf, int node)
+ struct mlx5_frag_buf *buf, int node)
{
dma_addr_t t;
buf->size = size;
buf->npages = 1;
buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
- buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size,
- &t, node);
- if (!buf->direct.buf)
+
+ buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
+ if (!buf->frags)
return -ENOMEM;
- buf->direct.map = t;
+ buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size,
+ &t, node);
+ if (!buf->frags->buf)
+ goto err_out;
+
+ buf->frags->map = t;
while (t & ((1 << buf->page_shift) - 1)) {
--buf->page_shift;
@@ -91,18 +96,24 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
}
return 0;
+err_out:
+ kfree(buf->frags);
+ return -ENOMEM;
}
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+int mlx5_buf_alloc(struct mlx5_core_dev *dev,
+ int size, struct mlx5_frag_buf *buf)
{
return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
}
-EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
+EXPORT_SYMBOL(mlx5_buf_alloc);
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
+void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
- dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
- buf->direct.map);
+ dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf,
+ buf->frags->map);
+
+ kfree(buf->frags);
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
@@ -147,6 +158,7 @@ err_free_buf:
err_out:
return -ENOMEM;
}
+EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
@@ -162,6 +174,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
}
kfree(buf->frags);
}
+EXPORT_SYMBOL_GPL(mlx5_frag_buf_free);
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node)
@@ -275,13 +288,13 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
}
EXPORT_SYMBOL_GPL(mlx5_db_free);
-void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
+void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
{
u64 addr;
int i;
for (i = 0; i < buf->npages; i++) {
- addr = buf->direct.map + (i << buf->page_shift);
+ addr = buf->frags->map + (i << buf->page_shift);
pas[i] = cpu_to_be64(addr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 1016e05c7ec7..a4179122a279 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -58,8 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
- if (refcount_dec_and_test(&mcq->refcount))
- complete(&mcq->free);
+ mlx5_cq_put(mcq);
if (time_after(jiffies, end))
break;
}
@@ -80,69 +79,19 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
* still arrive.
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
- refcount_inc(&cq->refcount);
+ mlx5_cq_hold(cq);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
}
-void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
-{
- struct mlx5_core_cq *cq;
- struct mlx5_cq_table *table = &dev->priv.cq_table;
-
- spin_lock(&table->lock);
- cq = radix_tree_lookup(&table->tree, cqn);
- if (likely(cq))
- refcount_inc(&cq->refcount);
- spin_unlock(&table->lock);
-
- if (!cq) {
- mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
- return;
- }
-
- ++cq->arm_sn;
-
- cq->comp(cq);
-
- if (refcount_dec_and_test(&cq->refcount))
- complete(&cq->free);
-}
-
-void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
-{
- struct mlx5_cq_table *table = &dev->priv.cq_table;
- struct mlx5_core_cq *cq;
-
- spin_lock(&table->lock);
-
- cq = radix_tree_lookup(&table->tree, cqn);
- if (cq)
- refcount_inc(&cq->refcount);
-
- spin_unlock(&table->lock);
-
- if (!cq) {
- mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
- return;
- }
-
- cq->event(cq, event_type);
-
- if (refcount_dec_and_test(&cq->refcount))
- complete(&cq->free);
-}
-
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen)
{
- struct mlx5_cq_table *table = &dev->priv.cq_table;
+ int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
+ u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
- u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
- int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
- c_eqn);
struct mlx5_eq *eq;
int err;
@@ -159,6 +108,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0;
cq->arm_sn = 0;
+ cq->eq = eq;
refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
if (!cq->comp)
@@ -167,12 +117,16 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->tasklet_ctx.priv = &eq->tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
- spin_lock_irq(&table->lock);
- err = radix_tree_insert(&table->tree, cq->cqn, cq);
- spin_unlock_irq(&table->lock);
+ /* Add to comp EQ CQ tree to recv comp events */
+ err = mlx5_eq_add_cq(eq, cq);
if (err)
goto err_cmd;
+ /* Add to async EQ CQ tree to recv async events */
+ err = mlx5_eq_add_cq(&dev->priv.eq_table.async_eq, cq);
+ if (err)
+ goto err_cq_add;
+
cq->pid = current->pid;
err = mlx5_debug_cq_add(dev, cq);
if (err)
@@ -183,6 +137,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return 0;
+err_cq_add:
+ mlx5_eq_del_cq(eq, cq);
err_cmd:
memset(din, 0, sizeof(din));
memset(dout, 0, sizeof(dout));
@@ -195,23 +151,17 @@ EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
- struct mlx5_cq_table *table = &dev->priv.cq_table;
u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
- struct mlx5_core_cq *tmp;
int err;
- spin_lock_irq(&table->lock);
- tmp = radix_tree_delete(&table->tree, cq->cqn);
- spin_unlock_irq(&table->lock);
- if (!tmp) {
- mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
- return -EINVAL;
- }
- if (tmp != cq) {
- mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
- return -EINVAL;
- }
+ err = mlx5_eq_del_cq(&dev->priv.eq_table.async_eq, cq);
+ if (err)
+ return err;
+
+ err = mlx5_eq_del_cq(cq->eq, cq);
+ if (err)
+ return err;
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
@@ -222,8 +172,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq);
- if (refcount_dec_and_test(&cq->refcount))
- complete(&cq->free);
+ mlx5_cq_put(cq);
wait_for_completion(&cq->free);
return 0;
@@ -270,21 +219,3 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
}
EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
-
-int mlx5_init_cq_table(struct mlx5_core_dev *dev)
-{
- struct mlx5_cq_table *table = &dev->priv.cq_table;
- int err;
-
- memset(table, 0, sizeof(*table));
- spin_lock_init(&table->lock);
- INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
- err = mlx5_cq_debugfs_init(dev);
-
- return err;
-}
-
-void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
-{
- mlx5_cq_debugfs_cleanup(dev);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 17b723218b0c..b994b80d5714 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -337,6 +337,14 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
}
EXPORT_SYMBOL(mlx5_unregister_interface);
+void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
+{
+ mutex_lock(&mlx5_intf_mutex);
+ mlx5_remove_dev_by_protocol(mdev, protocol);
+ mlx5_add_dev_by_protocol(mdev, protocol);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
{
struct mlx5_priv *priv = &mdev->priv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index fd509160c8f6..d93ff567b40d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -246,6 +246,9 @@ const char *parse_fs_dst(struct trace_seq *p,
case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
trace_seq_printf(p, "counter_id=%u\n", counter_id);
break;
+ case MLX5_FLOW_DESTINATION_TYPE_PORT:
+ trace_seq_printf(p, "port\n");
+ break;
}
trace_seq_putc(p, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 80eef4163f52..a6ba57fbb414 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -163,9 +163,9 @@ TRACE_EVENT(mlx5_fs_set_fte,
fs_get_obj(__entry->fg, fte->node.parent);
__entry->group_index = __entry->fg->id;
__entry->index = fte->index;
- __entry->action = fte->action;
+ __entry->action = fte->action.action;
__entry->mask_enable = __entry->fg->mask.match_criteria_enable;
- __entry->flow_tag = fte->flow_tag;
+ __entry->flow_tag = fte->action.flow_tag;
memcpy(__entry->mask_outer,
MLX5_ADDR_OF(fte_match_param,
&__entry->fg->mask.match_criteria,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index bac5103efad3..cf58c9637904 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -38,17 +38,24 @@
#include <linux/module.h>
#include "en.h"
-#include "accel/ipsec.h"
#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
-struct mlx5e_ipsec_sa_entry {
- struct hlist_node hlist; /* Item in SADB_RX hashtable */
- unsigned int handle; /* Handle in SADB_RX */
- struct xfrm_state *x;
- struct mlx5e_ipsec *ipsec;
- void *context;
-};
+
+static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
+{
+ struct mlx5e_ipsec_sa_entry *sa;
+
+ if (!x)
+ return NULL;
+
+ sa = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
+ if (!sa)
+ return NULL;
+
+ WARN_ON(sa->x != x);
+ return sa;
+}
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
unsigned int handle)
@@ -74,18 +81,16 @@ static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
unsigned long flags;
int ret;
- spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
if (ret < 0)
- goto out;
+ return ret;
+ spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
sa_entry->handle = ret;
hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
- ret = 0;
-
-out:
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
- return ret;
+
+ return 0;
}
static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -101,87 +106,99 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
- unsigned long flags;
- /* Wait for the hash_del_rcu call in sadb_rx_del to affect data path */
- synchronize_rcu();
- spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
+ /* xfrm already doing sync rcu between del and free callbacks */
+
ida_simple_remove(&ipsec->halloc, sa_entry->handle);
- spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
}
-static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x)
+static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
{
- unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4;
-
- switch (key_len) {
- case 16:
- return MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128;
- case 32:
- return MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128;
- default:
- netdev_warn(x->xso.dev, "Bad key len: %d for alg %s\n",
- key_len, x->aead->alg_name);
- return -1;
+ struct xfrm_replay_state_esn *replay_esn;
+ u32 seq_bottom;
+ u8 overlap;
+ u32 *esn;
+
+ if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
+ sa_entry->esn_state.trigger = 0;
+ return false;
+ }
+
+ replay_esn = sa_entry->x->replay_esn;
+ seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
+ overlap = sa_entry->esn_state.overlap;
+
+ sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
+ htonl(seq_bottom));
+ esn = &sa_entry->esn_state.esn;
+
+ sa_entry->esn_state.trigger = 1;
+ if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
+ ++(*esn);
+ sa_entry->esn_state.overlap = 0;
+ return true;
+ } else if (unlikely(!overlap &&
+ (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
+ sa_entry->esn_state.overlap = 1;
+ return true;
}
+
+ return false;
}
-static void mlx5e_ipsec_build_hw_sa(u32 op, struct mlx5e_ipsec_sa_entry *sa_entry,
- struct mlx5_accel_ipsec_sa *hw_sa)
+static void
+mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct xfrm_state *x = sa_entry->x;
+ struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm;
struct aead_geniv_ctx *geniv_ctx;
- unsigned int crypto_data_len;
struct crypto_aead *aead;
- unsigned int key_len;
+ unsigned int crypto_data_len, key_len;
int ivsize;
- memset(hw_sa, 0, sizeof(*hw_sa));
-
- if (op == MLX5_IPSEC_CMD_ADD_SA) {
- crypto_data_len = (x->aead->alg_key_len + 7) / 8;
- key_len = crypto_data_len - 4; /* 4 bytes salt at end */
- aead = x->data;
- geniv_ctx = crypto_aead_ctx(aead);
- ivsize = crypto_aead_ivsize(aead);
-
- memcpy(&hw_sa->key_enc, x->aead->alg_key, key_len);
- /* Duplicate 128 bit key twice according to HW layout */
- if (key_len == 16)
- memcpy(&hw_sa->key_enc[16], x->aead->alg_key, key_len);
- memcpy(&hw_sa->gcm.salt_iv, geniv_ctx->salt, ivsize);
- hw_sa->gcm.salt = *((__be32 *)(x->aead->alg_key + key_len));
- }
+ memset(attrs, 0, sizeof(*attrs));
- hw_sa->cmd = htonl(op);
- hw_sa->flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN;
- if (x->props.family == AF_INET) {
- hw_sa->sip[3] = x->props.saddr.a4;
- hw_sa->dip[3] = x->id.daddr.a4;
- hw_sa->sip_masklen = 32;
- hw_sa->dip_masklen = 32;
- } else {
- memcpy(hw_sa->sip, x->props.saddr.a6, sizeof(hw_sa->sip));
- memcpy(hw_sa->dip, x->id.daddr.a6, sizeof(hw_sa->dip));
- hw_sa->sip_masklen = 128;
- hw_sa->dip_masklen = 128;
- hw_sa->flags |= MLX5_IPSEC_SADB_IPV6;
- }
- hw_sa->spi = x->id.spi;
- hw_sa->sw_sa_handle = htonl(sa_entry->handle);
- switch (x->id.proto) {
- case IPPROTO_ESP:
- hw_sa->flags |= MLX5_IPSEC_SADB_IP_ESP;
- break;
- case IPPROTO_AH:
- hw_sa->flags |= MLX5_IPSEC_SADB_IP_AH;
- break;
- default:
- break;
+ /* key */
+ crypto_data_len = (x->aead->alg_key_len + 7) / 8;
+ key_len = crypto_data_len - 4; /* 4 bytes salt at end */
+
+ memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
+ aes_gcm->key_len = key_len * 8;
+
+ /* salt and seq_iv */
+ aead = x->data;
+ geniv_ctx = crypto_aead_ctx(aead);
+ ivsize = crypto_aead_ivsize(aead);
+ memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
+ memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
+ sizeof(aes_gcm->salt));
+
+ /* iv len */
+ aes_gcm->icv_len = x->aead->alg_icv_len;
+
+ /* esn */
+ if (sa_entry->esn_state.trigger) {
+ attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
+ attrs->esn = sa_entry->esn_state.esn;
+ if (sa_entry->esn_state.overlap)
+ attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
}
- hw_sa->enc_mode = mlx5e_ipsec_enc_mode(x);
- if (!(x->xso.flags & XFRM_OFFLOAD_INBOUND))
- hw_sa->flags |= MLX5_IPSEC_SADB_DIR_SX;
+
+ /* rx handle */
+ attrs->sa_handle = sa_entry->handle;
+
+ /* algo type */
+ attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
+
+ /* action */
+ attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ?
+ MLX5_ACCEL_ESP_ACTION_ENCRYPT :
+ MLX5_ACCEL_ESP_ACTION_DECRYPT;
+ /* flags */
+ attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
+ MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
+ MLX5_ACCEL_ESP_FLAGS_TUNNEL;
}
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
@@ -203,7 +220,9 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
netdev_info(netdev, "Cannot offload compressed xfrm states\n");
return -EINVAL;
}
- if (x->props.flags & XFRM_STATE_ESN) {
+ if (x->props.flags & XFRM_STATE_ESN &&
+ !(mlx5_accel_ipsec_device_caps(priv->mdev) &
+ MLX5_ACCEL_IPSEC_CAP_ESN)) {
netdev_info(netdev, "Cannot offload ESN xfrm states\n");
return -EINVAL;
}
@@ -251,7 +270,8 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL;
}
if (x->props.family == AF_INET6 &&
- !(mlx5_accel_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_IPV6)) {
+ !(mlx5_accel_ipsec_device_caps(priv->mdev) &
+ MLX5_ACCEL_IPSEC_CAP_IPV6)) {
netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
return -EINVAL;
}
@@ -262,9 +282,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
struct net_device *netdev = x->xso.dev;
- struct mlx5_accel_ipsec_sa hw_sa;
+ struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_priv *priv;
- void *context;
+ __be32 saddr[4] = {0}, daddr[4] = {0}, spi;
+ bool is_ipv6 = false;
int err;
priv = netdev_priv(netdev);
@@ -291,22 +312,49 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
goto err_entry;
}
+ } else {
+ sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
+ mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
}
- mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_ADD_SA, sa_entry, &hw_sa);
- context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
- if (IS_ERR(context)) {
- err = PTR_ERR(context);
+ /* check esn */
+ mlx5e_ipsec_update_esn_state(sa_entry);
+
+ /* create xfrm */
+ mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
+ sa_entry->xfrm =
+ mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
+ MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
+ if (IS_ERR(sa_entry->xfrm)) {
+ err = PTR_ERR(sa_entry->xfrm);
goto err_sadb_rx;
}
- err = mlx5_accel_ipsec_sa_cmd_wait(context);
- if (err)
- goto err_sadb_rx;
+ /* create hw context */
+ if (x->props.family == AF_INET) {
+ saddr[3] = x->props.saddr.a4;
+ daddr[3] = x->id.daddr.a4;
+ } else {
+ memcpy(saddr, x->props.saddr.a6, sizeof(saddr));
+ memcpy(daddr, x->id.daddr.a6, sizeof(daddr));
+ is_ipv6 = true;
+ }
+ spi = x->id.spi;
+ sa_entry->hw_context =
+ mlx5_accel_esp_create_hw_context(priv->mdev,
+ sa_entry->xfrm,
+ saddr, daddr, spi,
+ is_ipv6);
+ if (IS_ERR(sa_entry->hw_context)) {
+ err = PTR_ERR(sa_entry->hw_context);
+ goto err_xfrm;
+ }
x->xso.offload_handle = (unsigned long)sa_entry;
goto out;
+err_xfrm:
+ mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
err_sadb_rx:
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
mlx5e_ipsec_sadb_rx_del(sa_entry);
@@ -320,43 +368,26 @@ out:
static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
- struct mlx5e_ipsec_sa_entry *sa_entry;
- struct mlx5_accel_ipsec_sa hw_sa;
- void *context;
+ struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- if (!x->xso.offload_handle)
+ if (!sa_entry)
return;
- sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
- WARN_ON(sa_entry->x != x);
-
if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
mlx5e_ipsec_sadb_rx_del(sa_entry);
-
- mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_DEL_SA, sa_entry, &hw_sa);
- context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
- if (IS_ERR(context))
- return;
-
- sa_entry->context = context;
}
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{
- struct mlx5e_ipsec_sa_entry *sa_entry;
- int res;
+ struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- if (!x->xso.offload_handle)
+ if (!sa_entry)
return;
- sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
- WARN_ON(sa_entry->x != x);
-
- res = mlx5_accel_ipsec_sa_cmd_wait(sa_entry->context);
- sa_entry->context = NULL;
- if (res) {
- /* Leftover object will leak */
- return;
+ if (sa_entry->hw_context) {
+ flush_workqueue(sa_entry->ipsec->wq);
+ mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
+ mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
}
if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
@@ -383,6 +414,14 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
ida_init(&ipsec->halloc);
ipsec->en_priv = priv;
ipsec->en_priv->ipsec = ipsec;
+ ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
+ MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
+ ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
+ priv->netdev->name);
+ if (!ipsec->wq) {
+ kfree(ipsec);
+ return -ENOMEM;
+ }
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return 0;
}
@@ -394,6 +433,9 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
if (!ipsec)
return;
+ drain_workqueue(ipsec->wq);
+ destroy_workqueue(ipsec->wq);
+
ida_destroy(&ipsec->halloc);
kfree(ipsec);
priv->ipsec = NULL;
@@ -414,11 +456,58 @@ static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
return true;
}
+struct mlx5e_ipsec_modify_state_work {
+ struct work_struct work;
+ struct mlx5_accel_esp_xfrm_attrs attrs;
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+};
+
+static void _update_xfrm_state(struct work_struct *work)
+{
+ int ret;
+ struct mlx5e_ipsec_modify_state_work *modify_work =
+ container_of(work, struct mlx5e_ipsec_modify_state_work, work);
+ struct mlx5e_ipsec_sa_entry *sa_entry = modify_work->sa_entry;
+
+ ret = mlx5_accel_esp_modify_xfrm(sa_entry->xfrm,
+ &modify_work->attrs);
+ if (ret)
+ netdev_warn(sa_entry->ipsec->en_priv->netdev,
+ "Not an IPSec offload device\n");
+
+ kfree(modify_work);
+}
+
+static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+ struct mlx5e_ipsec_modify_state_work *modify_work;
+ bool need_update;
+
+ if (!sa_entry)
+ return;
+
+ need_update = mlx5e_ipsec_update_esn_state(sa_entry);
+ if (!need_update)
+ return;
+
+ modify_work = kzalloc(sizeof(*modify_work), GFP_ATOMIC);
+ if (!modify_work)
+ return;
+
+ mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
+ modify_work->sa_entry = sa_entry;
+
+ INIT_WORK(&modify_work->work, _update_xfrm_state);
+ WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work));
+}
+
static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
.xdo_dev_state_free = mlx5e_xfrm_free_state,
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
};
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
@@ -429,7 +518,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
if (!priv->ipsec)
return;
- if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_ESP) ||
+ if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
!MLX5_CAP_ETH(mdev, swp)) {
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
return;
@@ -448,7 +537,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
- if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_LSO) ||
+ if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
!MLX5_CAP_ETH(mdev, swp_lso)) {
mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 56e00baf16cc..1198fc1eba4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -40,7 +40,11 @@
#include <net/xfrm.h>
#include <linux/idr.h>
+#include "accel/ipsec.h"
+
#define MLX5E_IPSEC_SADB_RX_BITS 10
+#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
+
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
#define MLX5E_METADATA_ETHER_LEN 8
@@ -77,10 +81,30 @@ struct mlx5e_ipsec_stats {
struct mlx5e_ipsec {
struct mlx5e_priv *en_priv;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
+ bool no_trailer;
spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */
struct ida halloc;
struct mlx5e_ipsec_sw_stats sw_stats;
struct mlx5e_ipsec_stats stats;
+ struct workqueue_struct *wq;
+};
+
+struct mlx5e_ipsec_esn_state {
+ u32 esn;
+ u8 trigger: 1;
+ u8 overlap: 1;
+};
+
+struct mlx5e_ipsec_sa_entry {
+ struct hlist_node hlist; /* Item in SADB_RX hashtable */
+ struct mlx5e_ipsec_esn_state esn_state;
+ unsigned int handle; /* Handle in SADB_RX */
+ struct xfrm_state *x;
+ struct mlx5e_ipsec *ipsec;
+ struct mlx5_accel_esp_xfrm *xfrm;
+ void *hw_context;
+ void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
+ struct xfrm_offload *xo);
};
void mlx5e_ipsec_build_inverse_table(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 6a7c8b04447e..c245d8e78509 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -42,10 +42,11 @@
enum {
MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
+ MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
};
struct mlx5e_ipsec_rx_metadata {
- unsigned char reserved;
+ unsigned char nexthdr;
__be32 sa_handle;
} __packed;
@@ -175,7 +176,30 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
}
}
-static void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_offload *xo)
+void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
+ struct xfrm_offload *xo)
+{
+ struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+ __u32 oseq = replay_esn->oseq;
+ int iv_offset;
+ __be64 seqno;
+ u32 seq_hi;
+
+ if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
+ MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
+ seq_hi = xo->seq.hi - 1;
+ } else {
+ seq_hi = xo->seq.hi;
+ }
+
+ /* Place the SN in the IV field */
+ seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
+ iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
+ skb_store_bits(skb, iv_offset, &seqno, 8);
+}
+
+void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
+ struct xfrm_offload *xo)
{
int iv_offset;
__be64 seqno;
@@ -227,6 +251,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_metadata *mdata;
+ struct mlx5e_ipsec_sa_entry *sa_entry;
struct xfrm_state *x;
if (!xo)
@@ -261,7 +286,8 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
goto drop;
}
mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
- mlx5e_ipsec_set_iv(skb, xo);
+ sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
+ sa_entry->set_iv_op(skb, x, xo);
mlx5e_ipsec_set_metadata(skb, mdata, xo);
return skb;
@@ -301,10 +327,17 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
switch (mdata->syndrome) {
case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
+ if (likely(priv->ipsec->no_trailer)) {
+ xo->flags |= XFRM_ESP_NO_TRAILER;
+ xo->proto = mdata->content.rx.nexthdr;
+ }
break;
case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
break;
+ case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
+ xo->status = CRYPTO_INVALID_PROTOCOL;
+ break;
default:
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index e37ae2598dbb..2bfbbef1b054 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -37,6 +37,7 @@
#ifdef CONFIG_MLX5_EN_IPSEC
#include <linux/skbuff.h>
+#include <net/xfrm.h>
#include "en.h"
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
@@ -46,6 +47,10 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_ipsec_inverse_table_init(void);
bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
netdev_features_t features);
+void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
+ struct xfrm_offload *xo);
+void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
+ struct xfrm_offload *xo);
struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_tx_wqe *wqe,
struct sk_buff *skb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 363d8dcb7f17..ea4b255380a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1156,6 +1156,15 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
kfree(ppriv); /* mlx5e_rep_priv */
}
+static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_rep_priv *rpriv;
+
+ rpriv = mlx5e_rep_to_rep_priv(rep);
+
+ return rpriv->netdev;
+}
+
static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1168,6 +1177,7 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
rep_if.load = mlx5e_vport_rep_load;
rep_if.unload = mlx5e_vport_rep_unload;
+ rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
}
}
@@ -1195,6 +1205,7 @@ void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
rep_if.load = mlx5e_nic_rep_load;
rep_if.unload = mlx5e_nic_rep_unload;
+ rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
rep_if.priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e5c3ab46a24a..8cce90dc461d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -53,7 +53,7 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
void *data)
{
- u32 ci = cqcc & cq->wq.sz_m1;
+ u32 ci = cqcc & cq->wq.fbc.sz_m1;
memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
}
@@ -75,9 +75,10 @@ static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
{
- u8 op_own = (cqcc >> cq->wq.log_sz) & 1;
- u32 wq_sz = 1 << cq->wq.log_sz;
- u32 ci = cqcc & cq->wq.sz_m1;
+ struct mlx5_frag_buf_ctrl *fbc = &cq->wq.fbc;
+ u8 op_own = (cqcc >> fbc->log_sz) & 1;
+ u32 wq_sz = 1 << fbc->log_sz;
+ u32 ci = cqcc & fbc->sz_m1;
u32 ci_top = min_t(u32, wq_sz, ci + n);
for (; ci < ci_top; ci++, n--) {
@@ -102,7 +103,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum;
cq->title.op_own &= 0xf0;
- cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz);
+ cq->title.op_own |= 0x01 & (cqcc >> cq->wq.fbc.log_sz);
cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fa86a1466718..7c33df2034f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -675,6 +675,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
+ .has_flow_tag = true,
.flow_tag = attr->flow_tag,
.encap_id = 0,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 25106e996a96..c1c94974e16b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -393,6 +393,51 @@ static void general_event_handler(struct mlx5_core_dev *dev,
}
}
+/* caller must eventually call mlx5_cq_put on the returned cq */
+static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
+{
+ struct mlx5_cq_table *table = &eq->cq_table;
+ struct mlx5_core_cq *cq = NULL;
+
+ spin_lock(&table->lock);
+ cq = radix_tree_lookup(&table->tree, cqn);
+ if (likely(cq))
+ mlx5_cq_hold(cq);
+ spin_unlock(&table->lock);
+
+ return cq;
+}
+
+static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn)
+{
+ struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
+
+ if (unlikely(!cq)) {
+ mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
+ return;
+ }
+
+ ++cq->arm_sn;
+
+ cq->comp(cq);
+
+ mlx5_cq_put(cq);
+}
+
+static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
+{
+ struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
+
+ if (unlikely(!cq)) {
+ mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
+ return;
+ }
+
+ cq->event(cq, event_type);
+
+ mlx5_cq_put(cq);
+}
+
static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
{
struct mlx5_eq *eq = eq_ptr;
@@ -415,7 +460,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
switch (eqe->type) {
case MLX5_EVENT_TYPE_COMP:
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
- mlx5_cq_completion(dev, cqn);
+ mlx5_eq_cq_completion(eq, cqn);
break;
case MLX5_EVENT_TYPE_DCT_DRAINED:
rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
@@ -472,7 +517,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
cqn, eqe->data.cq_err.syndrome);
- mlx5_cq_event(dev, cqn, eqe->type);
+ mlx5_eq_cq_event(eq, cqn, eqe->type);
break;
case MLX5_EVENT_TYPE_PAGE_REQUEST:
@@ -567,6 +612,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name,
enum mlx5_eq_type type)
{
+ struct mlx5_cq_table *cq_table = &eq->cq_table;
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv;
irq_handler_t handler;
@@ -576,6 +622,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
u32 *in;
int err;
+ /* Init CQ table */
+ memset(cq_table, 0, sizeof(*cq_table));
+ spin_lock_init(&cq_table->lock);
+ INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+
eq->type = type;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
@@ -669,7 +720,6 @@ err_buf:
mlx5_buf_free(dev, &eq->buf);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
@@ -696,7 +746,40 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
+
+int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
+{
+ struct mlx5_cq_table *table = &eq->cq_table;
+ int err;
+
+ spin_lock_irq(&table->lock);
+ err = radix_tree_insert(&table->tree, cq->cqn, cq);
+ spin_unlock_irq(&table->lock);
+
+ return err;
+}
+
+int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
+{
+ struct mlx5_cq_table *table = &eq->cq_table;
+ struct mlx5_core_cq *tmp;
+
+ spin_lock_irq(&table->lock);
+ tmp = radix_tree_delete(&table->tree, cq->cqn);
+ spin_unlock_irq(&table->lock);
+
+ if (!tmp) {
+ mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
+ return -ENOENT;
+ }
+
+ if (tmp != cq) {
+ mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
int mlx5_eq_init(struct mlx5_core_dev *dev)
{
@@ -840,4 +923,3 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
-EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index c2b1d7d351fc..77b7272eaaa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1619,10 +1619,14 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
esw->mode = mode;
- if (mode == SRIOV_LEGACY)
+ if (mode == SRIOV_LEGACY) {
err = esw_create_legacy_fdb_table(esw, nvfs + 1);
- else
+ } else {
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+
err = esw_offloads_init(esw, nvfs + 1);
+ }
+
if (err)
goto abort;
@@ -1644,12 +1648,17 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
abort:
esw->mode = SRIOV_NONE;
+
+ if (mode == SRIOV_OFFLOADS)
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+
return err;
}
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
+ int old_mode;
int nvports;
int i;
@@ -1675,7 +1684,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
else if (esw->mode == SRIOV_OFFLOADS)
esw_offloads_cleanup(esw, nvports);
+ old_mode = esw->mode;
esw->mode = SRIOV_NONE;
+
+ if (old_mode == SRIOV_OFFLOADS)
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
}
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
@@ -2175,3 +2188,9 @@ free_out:
kvfree(out);
return err;
}
+
+u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
+{
+ return esw->mode;
+}
+EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 2fa037066b2f..98d2177d0806 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -37,19 +37,9 @@
#include <linux/if_link.h>
#include <net/devlink.h>
#include <linux/mlx5/device.h>
+#include <linux/mlx5/eswitch.h>
#include "lib/mpfs.h"
-enum {
- SRIOV_NONE,
- SRIOV_LEGACY,
- SRIOV_OFFLOADS
-};
-
-enum {
- REP_ETH,
- NUM_REP_TYPES,
-};
-
#ifdef CONFIG_MLX5_ESWITCH
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -139,29 +129,13 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_table *fdb;
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp;
- struct mlx5_flow_handle *miss_rule;
+ struct mlx5_flow_handle *miss_rule_uni;
+ struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount;
} offloads;
};
};
-struct mlx5_eswitch_rep;
-struct mlx5_eswitch_rep_if {
- int (*load)(struct mlx5_core_dev *dev,
- struct mlx5_eswitch_rep *rep);
- void (*unload)(struct mlx5_eswitch_rep *rep);
- void *priv;
- bool valid;
-};
-
-struct mlx5_eswitch_rep {
- struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
- u16 vport;
- u8 hw_id[ETH_ALEN];
- u16 vlan;
- u32 vlan_refcount;
-};
-
struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
@@ -231,9 +205,6 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats);
-struct mlx5_flow_handle *
-mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport,
- u32 sqn);
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
struct mlx5_flow_spec;
@@ -278,13 +249,6 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
-void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
- int vport_index,
- struct mlx5_eswitch_rep_if *rep_if,
- u8 rep_type);
-void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport_index,
- u8 rep_type);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 99f583a15cc3..0a8303c1b52f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -338,6 +338,7 @@ out:
kvfree(spec);
return flow_rule;
}
+EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
{
@@ -350,7 +351,11 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec;
+ void *headers_c;
+ void *headers_v;
int err = 0;
+ u8 *dmac_c;
+ u8 *dmac_v;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -358,6 +363,13 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
goto out;
}
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
+ outer_headers.dmac_47_16);
+ dmac_c[0] = 0x01;
+
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = 0;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
@@ -366,11 +378,28 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
- esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
+ esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
goto out;
}
- esw->fdb_table.offloads.miss_rule = flow_rule;
+ esw->fdb_table.offloads.miss_rule_uni = flow_rule;
+
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
+ dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
+ outer_headers.dmac_47_16);
+ dmac_v[0] = 0x01;
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
+ &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
+ mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
+ goto out;
+ }
+
+ esw->fdb_table.offloads.miss_rule_multi = flow_rule;
+
out:
kvfree(spec);
return err;
@@ -426,6 +455,7 @@ static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
}
#define MAX_PF_SQ 256
+#define MAX_SQ_NVPORTS 32
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
{
@@ -438,6 +468,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in;
+ u8 *dmac;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
@@ -455,7 +486,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
if (err)
goto fast_fdb_err;
- table_size = nvports + MAX_PF_SQ + 1;
+ table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
ft_attr.max_fte = table_size;
ft_attr.prio = FDB_SLOW_PATH;
@@ -478,7 +509,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
- ix = nvports + MAX_PF_SQ;
+ ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
@@ -492,10 +523,16 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
/* create miss group */
memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
@@ -531,7 +568,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
return;
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
- mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
+ mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
+ mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
@@ -789,14 +827,9 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
- /* disable PF RoCE so missed packets don't go through RoCE steering */
- mlx5_dev_list_lock();
- mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_dev_list_unlock();
-
err = esw_create_offloads_fdb_tables(esw, nvports);
if (err)
- goto create_fdb_err;
+ return err;
err = esw_create_offloads_table(esw);
if (err)
@@ -821,12 +854,6 @@ create_fg_err:
create_ft_err:
esw_destroy_offloads_fdb_tables(esw);
-create_fdb_err:
- /* enable back PF RoCE */
- mlx5_dev_list_lock();
- mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_dev_list_unlock();
-
return err;
}
@@ -844,9 +871,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
}
/* enable back PF RoCE */
- mlx5_dev_list_lock();
- mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_dev_list_unlock();
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
return err;
}
@@ -1160,10 +1185,12 @@ void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
rep_if->load = __rep_if->load;
rep_if->unload = __rep_if->unload;
+ rep_if->get_proto_dev = __rep_if->get_proto_dev;
rep_if->priv = __rep_if->priv;
rep_if->valid = true;
}
+EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
int vport_index, u8 rep_type)
@@ -1178,6 +1205,7 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
rep->rep_if[rep_type].valid = false;
}
+EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{
@@ -1188,3 +1216,35 @@ void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
rep = &offloads->vport_reps[UPLINK_REP_INDEX];
return rep->rep_if[rep_type].priv;
}
+
+void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
+ int vport,
+ u8 rep_type)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+ struct mlx5_eswitch_rep *rep;
+
+ if (vport == FDB_UPLINK_VPORT)
+ vport = UPLINK_REP_INDEX;
+
+ rep = &offloads->vport_reps[vport];
+
+ if (rep->rep_if[rep_type].valid &&
+ rep->rep_if[rep_type].get_proto_dev)
+ return rep->rep_if[rep_type].get_proto_dev(rep);
+ return NULL;
+}
+EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
+
+void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
+{
+ return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
+}
+EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
+
+struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
+ int vport)
+{
+ return &esw->offloads.vport_reps[vport];
+}
+EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 35d0e33381ca..4f1568528738 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -31,49 +31,91 @@
*
*/
+#include <linux/rhashtable.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs_helpers.h>
+#include <linux/mlx5/fs.h>
+#include <linux/rbtree.h>
#include "mlx5_core.h"
+#include "fs_cmd.h"
#include "fpga/ipsec.h"
#include "fpga/sdk.h"
#include "fpga/core.h"
#define SBU_QP_QUEUE_SIZE 8
+#define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC (60 * 1000)
-enum mlx5_ipsec_response_syndrome {
- MLX5_IPSEC_RESPONSE_SUCCESS = 0,
- MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
- MLX5_IPSEC_RESPONSE_SADB_ISSUE = 2,
- MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
+enum mlx5_fpga_ipsec_cmd_status {
+ MLX5_FPGA_IPSEC_CMD_PENDING,
+ MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
+ MLX5_FPGA_IPSEC_CMD_COMPLETE,
};
-enum mlx5_fpga_ipsec_sacmd_status {
- MLX5_FPGA_IPSEC_SACMD_PENDING,
- MLX5_FPGA_IPSEC_SACMD_SEND_FAIL,
- MLX5_FPGA_IPSEC_SACMD_COMPLETE,
-};
-
-struct mlx5_ipsec_command_context {
+struct mlx5_fpga_ipsec_cmd_context {
struct mlx5_fpga_dma_buf buf;
- struct mlx5_accel_ipsec_sa sa;
- enum mlx5_fpga_ipsec_sacmd_status status;
+ enum mlx5_fpga_ipsec_cmd_status status;
+ struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
int status_code;
struct completion complete;
struct mlx5_fpga_device *dev;
struct list_head list; /* Item in pending_cmds */
+ u8 command[0];
+};
+
+struct mlx5_fpga_esp_xfrm;
+
+struct mlx5_fpga_ipsec_sa_ctx {
+ struct rhash_head hash;
+ struct mlx5_ifc_fpga_ipsec_sa hw_sa;
+ struct mlx5_core_dev *dev;
+ struct mlx5_fpga_esp_xfrm *fpga_xfrm;
+};
+
+struct mlx5_fpga_esp_xfrm {
+ unsigned int num_rules;
+ struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
+ struct mutex lock; /* xfrm lock */
+ struct mlx5_accel_esp_xfrm accel_xfrm;
+};
+
+struct mlx5_fpga_ipsec_rule {
+ struct rb_node node;
+ struct fs_fte *fte;
+ struct mlx5_fpga_ipsec_sa_ctx *ctx;
};
-struct mlx5_ipsec_sadb_resp {
- __be32 syndrome;
- __be32 sw_sa_handle;
- u8 reserved[24];
-} __packed;
+static const struct rhashtable_params rhash_sa = {
+ .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
+ .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
+ .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
+ .automatic_shrinking = true,
+ .min_size = 1,
+};
struct mlx5_fpga_ipsec {
+ struct mlx5_fpga_device *fdev;
struct list_head pending_cmds;
spinlock_t pending_cmds_lock; /* Protects pending_cmds */
u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
struct mlx5_fpga_conn *conn;
+
+ struct notifier_block fs_notifier_ingress_bypass;
+ struct notifier_block fs_notifier_egress;
+
+ /* Map hardware SA --> SA context
+ * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx)
+ * We will use this hash to avoid SAs duplication in fpga which
+ * aren't allowed
+ */
+ struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
+ struct mutex sa_hash_lock;
+
+ /* Tree holding all rules for this fpga device
+ * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
+ */
+ struct rb_root rules_rb;
+ struct mutex rules_rb_lock; /* rules lock */
};
static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
@@ -97,28 +139,29 @@ static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_dma_buf *buf,
u8 status)
{
- struct mlx5_ipsec_command_context *context;
+ struct mlx5_fpga_ipsec_cmd_context *context;
if (status) {
- context = container_of(buf, struct mlx5_ipsec_command_context,
+ context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
buf);
mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
status);
- context->status = MLX5_FPGA_IPSEC_SACMD_SEND_FAIL;
+ context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
complete(&context->complete);
}
}
-static inline int syndrome_to_errno(enum mlx5_ipsec_response_syndrome syndrome)
+static inline
+int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
{
switch (syndrome) {
- case MLX5_IPSEC_RESPONSE_SUCCESS:
+ case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
return 0;
- case MLX5_IPSEC_RESPONSE_SADB_ISSUE:
+ case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
return -EEXIST;
- case MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST:
+ case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
return -EINVAL;
- case MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
+ case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
return -EIO;
}
return -EIO;
@@ -126,9 +169,9 @@ static inline int syndrome_to_errno(enum mlx5_ipsec_response_syndrome syndrome)
static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
{
- struct mlx5_ipsec_sadb_resp *resp = buf->sg[0].data;
- struct mlx5_ipsec_command_context *context;
- enum mlx5_ipsec_response_syndrome syndrome;
+ struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
+ struct mlx5_fpga_ipsec_cmd_context *context;
+ enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
struct mlx5_fpga_device *fdev = cb_arg;
unsigned long flags;
@@ -138,12 +181,12 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
return;
}
- mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x sa_id %x\n",
- ntohl(resp->syndrome), ntohl(resp->sw_sa_handle));
+ mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
+ ntohl(resp->syndrome));
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
- struct mlx5_ipsec_command_context,
+ struct mlx5_fpga_ipsec_cmd_context,
list);
if (context)
list_del(&context->list);
@@ -155,51 +198,48 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
}
mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
- if (context->sa.sw_sa_handle != resp->sw_sa_handle) {
- mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
- ntohl(context->sa.sw_sa_handle),
- ntohl(resp->sw_sa_handle));
- return;
- }
-
syndrome = ntohl(resp->syndrome);
context->status_code = syndrome_to_errno(syndrome);
- context->status = MLX5_FPGA_IPSEC_SACMD_COMPLETE;
+ context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
+ memcpy(&context->resp, resp, sizeof(*resp));
if (context->status_code)
- mlx5_fpga_warn(fdev, "IPSec SADB command failed with syndrome %08x\n",
+ mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
syndrome);
+
complete(&context->complete);
}
-void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
- struct mlx5_accel_ipsec_sa *cmd)
+static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
+ const void *cmd, int cmd_size)
{
- struct mlx5_ipsec_command_context *context;
+ struct mlx5_fpga_ipsec_cmd_context *context;
struct mlx5_fpga_device *fdev = mdev->fpga;
unsigned long flags;
- int res = 0;
+ int res;
- BUILD_BUG_ON((sizeof(struct mlx5_accel_ipsec_sa) & 3) != 0);
if (!fdev || !fdev->ipsec)
return ERR_PTR(-EOPNOTSUPP);
- context = kzalloc(sizeof(*context), GFP_ATOMIC);
+ if (cmd_size & 3)
+ return ERR_PTR(-EINVAL);
+
+ context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
if (!context)
return ERR_PTR(-ENOMEM);
- memcpy(&context->sa, cmd, sizeof(*cmd));
+ context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
+ context->dev = fdev;
context->buf.complete = mlx5_fpga_ipsec_send_complete;
- context->buf.sg[0].size = sizeof(context->sa);
- context->buf.sg[0].data = &context->sa;
init_completion(&context->complete);
- context->dev = fdev;
+ memcpy(&context->command, cmd, cmd_size);
+ context->buf.sg[0].size = cmd_size;
+ context->buf.sg[0].data = &context->command;
+
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
- context->status = MLX5_FPGA_IPSEC_SACMD_PENDING;
-
res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
if (res) {
mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n",
@@ -214,47 +254,103 @@ void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
return context;
}
-int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx)
+static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
{
- struct mlx5_ipsec_command_context *context = ctx;
+ struct mlx5_fpga_ipsec_cmd_context *context = ctx;
+ unsigned long timeout =
+ msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC);
int res;
- res = wait_for_completion_killable(&context->complete);
- if (res) {
+ res = wait_for_completion_timeout(&context->complete, timeout);
+ if (!res) {
mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
- return -EINTR;
+ return -ETIMEDOUT;
}
- if (context->status == MLX5_FPGA_IPSEC_SACMD_COMPLETE)
+ if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
res = context->status_code;
else
res = -EIO;
- kfree(context);
return res;
}
+static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
+{
+ if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
+ return true;
+ return false;
+}
+
+static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
+ struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
+ int opcode)
+{
+ struct mlx5_core_dev *dev = fdev->mdev;
+ struct mlx5_ifc_fpga_ipsec_sa *sa;
+ struct mlx5_fpga_ipsec_cmd_context *cmd_context;
+ size_t sa_cmd_size;
+ int err;
+
+ hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
+ if (is_v2_sadb_supported(fdev->ipsec))
+ sa_cmd_size = sizeof(*hw_sa);
+ else
+ sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
+
+ cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
+ mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
+ if (IS_ERR(cmd_context))
+ return PTR_ERR(cmd_context);
+
+ err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
+ if (err)
+ goto out;
+
+ sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
+ if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
+ mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
+ ntohl(sa->ipsec_sa_v1.sw_sa_handle),
+ ntohl(cmd_context->resp.sw_sa_handle));
+ err = -EIO;
+ }
+
+out:
+ kfree(cmd_context);
+ return err;
+}
+
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
u32 ret = 0;
- if (mlx5_fpga_is_ipsec_device(mdev))
- ret |= MLX5_ACCEL_IPSEC_DEVICE;
- else
+ if (mlx5_fpga_is_ipsec_device(mdev)) {
+ ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
+ ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
+ } else {
return ret;
+ }
if (!fdev->ipsec)
return ret;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
- ret |= MLX5_ACCEL_IPSEC_ESP;
+ ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
- ret |= MLX5_ACCEL_IPSEC_IPV6;
+ ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
- ret |= MLX5_ACCEL_IPSEC_LSO;
+ ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
+
+ if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
+ ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
+
+ if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
+ ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
+ ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
+ }
return ret;
}
@@ -318,6 +414,828 @@ out:
return ret;
}
+static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
+{
+ struct mlx5_fpga_ipsec_cmd_context *context;
+ struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
+ int err;
+
+ cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
+ cmd.flags = htonl(flags);
+ context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
+ if (IS_ERR(context)) {
+ err = PTR_ERR(context);
+ goto out;
+ }
+
+ err = mlx5_fpga_ipsec_cmd_wait(context);
+ if (err)
+ goto out;
+
+ if ((context->resp.flags & cmd.flags) != cmd.flags) {
+ mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
+ cmd.flags,
+ context->resp.flags);
+ err = -EIO;
+ }
+
+out:
+ return err;
+}
+
+static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
+{
+ u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
+ u32 flags = 0;
+
+ if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
+ flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
+
+ return mlx5_fpga_ipsec_set_caps(mdev, flags);
+}
+
+static void
+mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
+ const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
+ struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
+{
+ const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
+
+ /* key */
+ memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
+ aes_gcm->key_len / 8);
+ /* Duplicate 128 bit key twice according to HW layout */
+ if (aes_gcm->key_len == 128)
+ memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
+ aes_gcm->aes_key, aes_gcm->key_len / 8);
+
+ /* salt and seq_iv */
+ memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
+ sizeof(aes_gcm->seq_iv));
+ memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
+ sizeof(aes_gcm->salt));
+
+ /* esn */
+ if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
+ hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
+ hw_sa->ipsec_sa_v1.flags |=
+ (xfrm_attrs->flags &
+ MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
+ MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
+ hw_sa->esn = htonl(xfrm_attrs->esn);
+ } else {
+ hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
+ hw_sa->ipsec_sa_v1.flags &=
+ ~(xfrm_attrs->flags &
+ MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
+ MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
+ hw_sa->esn = 0;
+ }
+
+ /* rx handle */
+ hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
+
+ /* enc mode */
+ switch (aes_gcm->key_len) {
+ case 128:
+ hw_sa->ipsec_sa_v1.enc_mode =
+ MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
+ break;
+ case 256:
+ hw_sa->ipsec_sa_v1.enc_mode =
+ MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
+ break;
+ }
+
+ /* flags */
+ hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
+ MLX5_FPGA_IPSEC_SA_SPI_EN |
+ MLX5_FPGA_IPSEC_SA_IP_ESP;
+
+ if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
+ hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
+ else
+ hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
+}
+
+static void
+mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
+ const __be32 saddr[4],
+ const __be32 daddr[4],
+ const __be32 spi, bool is_ipv6,
+ struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
+{
+ mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
+
+ /* IPs */
+ memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
+ memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
+
+ /* SPI */
+ hw_sa->ipsec_sa_v1.spi = spi;
+
+ /* flags */
+ if (is_ipv6)
+ hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
+}
+
+static bool is_full_mask(const void *p, size_t len)
+{
+ WARN_ON(len % 4);
+
+ return !memchr_inv(p, 0xff, len);
+}
+
+static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
+ const u32 *match_c,
+ const u32 *match_v)
+{
+ const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
+ match_c,
+ misc_parameters);
+ const void *headers_c = MLX5_ADDR_OF(fte_match_param,
+ match_c,
+ outer_headers);
+ const void *headers_v = MLX5_ADDR_OF(fte_match_param,
+ match_v,
+ outer_headers);
+
+ if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
+ const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+
+ if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
+ ipv4)) ||
+ !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
+ ipv4)))
+ return false;
+ } else {
+ const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6);
+ const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
+
+ if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6)) ||
+ !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6)))
+ return false;
+ }
+
+ if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
+ outer_esp_spi),
+ MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
+ return false;
+
+ return true;
+}
+
+static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
+ u8 match_criteria_enable,
+ const u32 *match_c,
+ const u32 *match_v)
+{
+ u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev);
+ bool ipv6_flow;
+
+ ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
+
+ if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
+ mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
+ mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
+ mlx5_fs_is_vxlan_flow(match_c) ||
+ !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
+ ipv6_flow))
+ return false;
+
+ if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
+ return false;
+
+ if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
+ mlx5_fs_is_outer_ipsec_flow(match_c))
+ return false;
+
+ if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
+ ipv6_flow)
+ return false;
+
+ if (!validate_fpga_full_mask(dev, match_c, match_v))
+ return false;
+
+ return true;
+}
+
+static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
+ u8 match_criteria_enable,
+ const u32 *match_c,
+ const u32 *match_v,
+ struct mlx5_flow_act *flow_act)
+{
+ const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
+ MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
+ bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
+ MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
+ int ret;
+
+ ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
+ match_v);
+ if (!ret)
+ return ret;
+
+ if (is_dmac || is_smac ||
+ (match_criteria_enable &
+ ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
+ (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
+ flow_act->has_flow_tag)
+ return false;
+
+ return true;
+}
+
+void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm *accel_xfrm,
+ const __be32 saddr[4],
+ const __be32 daddr[4],
+ const __be32 spi, bool is_ipv6)
+{
+ struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
+ struct mlx5_fpga_esp_xfrm *fpga_xfrm =
+ container_of(accel_xfrm, typeof(*fpga_xfrm),
+ accel_xfrm);
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
+ int opcode, err;
+ void *context;
+
+ /* alloc SA */
+ sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
+ if (!sa_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ sa_ctx->dev = mdev;
+
+ /* build candidate SA */
+ mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
+ saddr, daddr, spi, is_ipv6,
+ &sa_ctx->hw_sa);
+
+ mutex_lock(&fpga_xfrm->lock);
+
+ if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */
+ /* all rules must be with same IPs and SPI */
+ if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
+ sizeof(sa_ctx->hw_sa))) {
+ context = ERR_PTR(-EINVAL);
+ goto exists;
+ }
+
+ ++fpga_xfrm->num_rules;
+ context = fpga_xfrm->sa_ctx;
+ goto exists;
+ }
+
+ /* This is unbounded fpga_xfrm, try to add to hash */
+ mutex_lock(&fipsec->sa_hash_lock);
+
+ err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
+ rhash_sa);
+ if (err) {
+ /* Can't bound different accel_xfrm to already existing sa_ctx.
+ * This is because we can't support multiple ketmats for
+ * same IPs and SPI
+ */
+ context = ERR_PTR(-EEXIST);
+ goto unlock_hash;
+ }
+
+ /* Bound accel_xfrm to sa_ctx */
+ opcode = is_v2_sadb_supported(fdev->ipsec) ?
+ MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
+ MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
+ err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
+ sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
+ if (err) {
+ context = ERR_PTR(err);
+ goto delete_hash;
+ }
+
+ mutex_unlock(&fipsec->sa_hash_lock);
+
+ ++fpga_xfrm->num_rules;
+ fpga_xfrm->sa_ctx = sa_ctx;
+ sa_ctx->fpga_xfrm = fpga_xfrm;
+
+ mutex_unlock(&fpga_xfrm->lock);
+
+ return sa_ctx;
+
+delete_hash:
+ WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
+ rhash_sa));
+unlock_hash:
+ mutex_unlock(&fipsec->sa_hash_lock);
+
+exists:
+ mutex_unlock(&fpga_xfrm->lock);
+ kfree(sa_ctx);
+ return context;
+}
+
+static void *
+mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
+ struct fs_fte *fte,
+ bool is_egress)
+{
+ struct mlx5_accel_esp_xfrm *accel_xfrm;
+ __be32 saddr[4], daddr[4], spi;
+ struct mlx5_flow_group *fg;
+ bool is_ipv6 = false;
+
+ fs_get_obj(fg, fte->node.parent);
+ /* validate */
+ if (is_egress &&
+ !mlx5_is_fpga_egress_ipsec_rule(mdev,
+ fg->mask.match_criteria_enable,
+ fg->mask.match_criteria,
+ fte->val,
+ &fte->action))
+ return ERR_PTR(-EINVAL);
+ else if (!mlx5_is_fpga_ipsec_rule(mdev,
+ fg->mask.match_criteria_enable,
+ fg->mask.match_criteria,
+ fte->val))
+ return ERR_PTR(-EINVAL);
+
+ /* get xfrm context */
+ accel_xfrm =
+ (struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
+
+ /* IPs */
+ if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
+ fte->val)) {
+ memcpy(&saddr[3],
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ fte->val,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ sizeof(saddr[3]));
+ memcpy(&daddr[3],
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ fte->val,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ sizeof(daddr[3]));
+ } else {
+ memcpy(saddr,
+ MLX5_ADDR_OF(fte_match_param,
+ fte->val,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ sizeof(saddr));
+ memcpy(daddr,
+ MLX5_ADDR_OF(fte_match_param,
+ fte->val,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ sizeof(daddr));
+ is_ipv6 = true;
+ }
+
+ /* SPI */
+ spi = MLX5_GET_BE(typeof(spi),
+ fte_match_param, fte->val,
+ misc_parameters.outer_esp_spi);
+
+ /* create */
+ return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
+ saddr, daddr,
+ spi, is_ipv6);
+}
+
+static void
+mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
+{
+ struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
+ struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
+ int opcode = is_v2_sadb_supported(fdev->ipsec) ?
+ MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
+ MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
+ int err;
+
+ err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
+ sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
+ if (err) {
+ WARN_ON(err);
+ return;
+ }
+
+ mutex_lock(&fipsec->sa_hash_lock);
+ WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
+ rhash_sa));
+ mutex_unlock(&fipsec->sa_hash_lock);
+}
+
+void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
+{
+ struct mlx5_fpga_esp_xfrm *fpga_xfrm =
+ ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
+
+ mutex_lock(&fpga_xfrm->lock);
+ if (!--fpga_xfrm->num_rules) {
+ mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
+ fpga_xfrm->sa_ctx = NULL;
+ }
+ mutex_unlock(&fpga_xfrm->lock);
+}
+
+static inline struct mlx5_fpga_ipsec_rule *
+_rule_search(struct rb_root *root, struct fs_fte *fte)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct mlx5_fpga_ipsec_rule *rule =
+ container_of(node, struct mlx5_fpga_ipsec_rule,
+ node);
+
+ if (rule->fte < fte)
+ node = node->rb_left;
+ else if (rule->fte > fte)
+ node = node->rb_right;
+ else
+ return rule;
+ }
+ return NULL;
+}
+
+static struct mlx5_fpga_ipsec_rule *
+rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
+{
+ struct mlx5_fpga_ipsec_rule *rule;
+
+ mutex_lock(&ipsec_dev->rules_rb_lock);
+ rule = _rule_search(&ipsec_dev->rules_rb, fte);
+ mutex_unlock(&ipsec_dev->rules_rb_lock);
+
+ return rule;
+}
+
+static inline int _rule_insert(struct rb_root *root,
+ struct mlx5_fpga_ipsec_rule *rule)
+{
+ struct rb_node **new = &root->rb_node, *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct mlx5_fpga_ipsec_rule *this =
+ container_of(*new, struct mlx5_fpga_ipsec_rule,
+ node);
+
+ parent = *new;
+ if (rule->fte < this->fte)
+ new = &((*new)->rb_left);
+ else if (rule->fte > this->fte)
+ new = &((*new)->rb_right);
+ else
+ return -EEXIST;
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&rule->node, parent, new);
+ rb_insert_color(&rule->node, root);
+
+ return 0;
+}
+
+static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
+ struct mlx5_fpga_ipsec_rule *rule)
+{
+ int ret;
+
+ mutex_lock(&ipsec_dev->rules_rb_lock);
+ ret = _rule_insert(&ipsec_dev->rules_rb, rule);
+ mutex_unlock(&ipsec_dev->rules_rb_lock);
+
+ return ret;
+}
+
+static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
+ struct mlx5_fpga_ipsec_rule *rule)
+{
+ struct rb_root *root = &ipsec_dev->rules_rb;
+
+ mutex_lock(&ipsec_dev->rules_rb_lock);
+ rb_erase(&rule->node, root);
+ mutex_unlock(&ipsec_dev->rules_rb_lock);
+}
+
+static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
+ struct mlx5_fpga_ipsec_rule *rule)
+{
+ _rule_delete(ipsec_dev, rule);
+ kfree(rule);
+}
+
+struct mailbox_mod {
+ uintptr_t saved_esp_id;
+ u32 saved_action;
+ u32 saved_outer_esp_spi_value;
+};
+
+static void restore_spec_mailbox(struct fs_fte *fte,
+ struct mailbox_mod *mbox_mod)
+{
+ char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
+ fte->val,
+ misc_parameters);
+
+ MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
+ mbox_mod->saved_outer_esp_spi_value);
+ fte->action.action |= mbox_mod->saved_action;
+ fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
+}
+
+static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
+ struct fs_fte *fte,
+ struct mailbox_mod *mbox_mod)
+{
+ char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
+ fte->val,
+ misc_parameters);
+
+ mbox_mod->saved_esp_id = fte->action.esp_id;
+ mbox_mod->saved_action = fte->action.action &
+ (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
+ mbox_mod->saved_outer_esp_spi_value =
+ MLX5_GET(fte_match_set_misc, misc_params_v,
+ outer_esp_spi);
+
+ fte->action.esp_id = 0;
+ fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
+ if (!MLX5_CAP_FLOWTABLE(mdev,
+ flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
+ MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
+}
+
+static enum fs_flow_table_type egress_to_fs_ft(bool egress)
+{
+ return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
+}
+
+static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ u32 *in,
+ unsigned int *group_id,
+ bool is_egress)
+{
+ int (*create_flow_group)(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft, u32 *in,
+ unsigned int *group_id) =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
+ char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
+ match_criteria.misc_parameters);
+ u32 saved_outer_esp_spi_mask;
+ u8 match_criteria_enable;
+ int ret;
+
+ if (MLX5_CAP_FLOWTABLE(dev,
+ flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
+ return create_flow_group(dev, ft, in, group_id);
+
+ match_criteria_enable =
+ MLX5_GET(create_flow_group_in, in, match_criteria_enable);
+ saved_outer_esp_spi_mask =
+ MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
+ if (!match_criteria_enable || !saved_outer_esp_spi_mask)
+ return create_flow_group(dev, ft, in, group_id);
+
+ MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
+
+ if (!(*misc_params_c) &&
+ !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
+ match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
+
+ ret = create_flow_group(dev, ft, in, group_id);
+
+ MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
+
+ return ret;
+}
+
+static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg,
+ struct fs_fte *fte,
+ bool is_egress)
+{
+ int (*create_fte)(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg,
+ struct fs_fte *fte) =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
+ struct mlx5_fpga_device *fdev = dev->fpga;
+ struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
+ struct mlx5_fpga_ipsec_rule *rule;
+ bool is_esp = fte->action.esp_id;
+ struct mailbox_mod mbox_mod;
+ int ret;
+
+ if (!is_esp ||
+ !(fte->action.action &
+ (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
+ return create_fte(dev, ft, fg, fte);
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
+ if (IS_ERR(rule->ctx)) {
+ kfree(rule);
+ return PTR_ERR(rule->ctx);
+ }
+
+ rule->fte = fte;
+ WARN_ON(rule_insert(fipsec, rule));
+
+ modify_spec_mailbox(dev, fte, &mbox_mod);
+ ret = create_fte(dev, ft, fg, fte);
+ restore_spec_mailbox(fte, &mbox_mod);
+ if (ret) {
+ _rule_delete(fipsec, rule);
+ mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
+ kfree(rule);
+ }
+
+ return ret;
+}
+
+static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int group_id,
+ int modify_mask,
+ struct fs_fte *fte,
+ bool is_egress)
+{
+ int (*update_fte)(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int group_id,
+ int modify_mask,
+ struct fs_fte *fte) =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
+ bool is_esp = fte->action.esp_id;
+ struct mailbox_mod mbox_mod;
+ int ret;
+
+ if (!is_esp ||
+ !(fte->action.action &
+ (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
+ return update_fte(dev, ft, group_id, modify_mask, fte);
+
+ modify_spec_mailbox(dev, fte, &mbox_mod);
+ ret = update_fte(dev, ft, group_id, modify_mask, fte);
+ restore_spec_mailbox(fte, &mbox_mod);
+
+ return ret;
+}
+
+static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte,
+ bool is_egress)
+{
+ int (*delete_fte)(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte) =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
+ struct mlx5_fpga_device *fdev = dev->fpga;
+ struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
+ struct mlx5_fpga_ipsec_rule *rule;
+ bool is_esp = fte->action.esp_id;
+ struct mailbox_mod mbox_mod;
+ int ret;
+
+ if (!is_esp ||
+ !(fte->action.action &
+ (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
+ return delete_fte(dev, ft, fte);
+
+ rule = rule_search(fipsec, fte);
+ if (!rule)
+ return -ENOENT;
+
+ mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
+ rule_delete(fipsec, rule);
+
+ modify_spec_mailbox(dev, fte, &mbox_mod);
+ ret = delete_fte(dev, ft, fte);
+ restore_spec_mailbox(fte, &mbox_mod);
+
+ return ret;
+}
+
+static int
+mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ u32 *in,
+ unsigned int *group_id)
+{
+ return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true);
+}
+
+static int
+mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg,
+ struct fs_fte *fte)
+{
+ return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true);
+}
+
+static int
+mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int group_id,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
+ true);
+}
+
+static int
+mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ return fpga_ipsec_fs_delete_fte(dev, ft, fte, true);
+}
+
+static int
+mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ u32 *in,
+ unsigned int *group_id)
+{
+ return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false);
+}
+
+static int
+mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg,
+ struct fs_fte *fte)
+{
+ return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false);
+}
+
+static int
+mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int group_id,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
+ false);
+}
+
+static int
+mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ return fpga_ipsec_fs_delete_fte(dev, ft, fte, false);
+}
+
+static struct mlx5_flow_cmds fpga_ipsec_ingress;
+static struct mlx5_flow_cmds fpga_ipsec_egress;
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
+{
+ switch (type) {
+ case FS_FT_NIC_RX:
+ return &fpga_ipsec_ingress;
+ case FS_FT_NIC_TX:
+ return &fpga_ipsec_egress;
+ default:
+ WARN_ON(true);
+ return NULL;
+ }
+}
+
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_conn_attr init_attr = {0};
@@ -332,6 +1250,8 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
if (!fdev->ipsec)
return -ENOMEM;
+ fdev->ipsec->fdev = fdev;
+
err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
fdev->ipsec->caps);
if (err) {
@@ -355,14 +1275,47 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
goto error;
}
fdev->ipsec->conn = conn;
+
+ err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
+ if (err)
+ goto err_destroy_conn;
+ mutex_init(&fdev->ipsec->sa_hash_lock);
+
+ fdev->ipsec->rules_rb = RB_ROOT;
+ mutex_init(&fdev->ipsec->rules_rb_lock);
+
+ err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
+ err);
+ goto err_destroy_hash;
+ }
+
return 0;
+err_destroy_hash:
+ rhashtable_destroy(&fdev->ipsec->sa_hash);
+
+err_destroy_conn:
+ mlx5_fpga_sbu_conn_destroy(conn);
+
error:
kfree(fdev->ipsec);
fdev->ipsec = NULL;
return err;
}
+static void destroy_rules_rb(struct rb_root *root)
+{
+ struct mlx5_fpga_ipsec_rule *r, *tmp;
+
+ rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
+ rb_erase(&r->node, root);
+ mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
+ kfree(r);
+ }
+}
+
void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
@@ -370,7 +1323,209 @@ void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
if (!mlx5_fpga_is_ipsec_device(mdev))
return;
+ destroy_rules_rb(&fdev->ipsec->rules_rb);
+ rhashtable_destroy(&fdev->ipsec->sa_hash);
+
mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
kfree(fdev->ipsec);
fdev->ipsec = NULL;
}
+
+void mlx5_fpga_ipsec_build_fs_cmds(void)
+{
+ /* ingress */
+ fpga_ipsec_ingress.create_flow_table =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
+ fpga_ipsec_ingress.destroy_flow_table =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
+ fpga_ipsec_ingress.modify_flow_table =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
+ fpga_ipsec_ingress.create_flow_group =
+ mlx5_fpga_ipsec_fs_create_flow_group_ingress;
+ fpga_ipsec_ingress.destroy_flow_group =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
+ fpga_ipsec_ingress.create_fte =
+ mlx5_fpga_ipsec_fs_create_fte_ingress;
+ fpga_ipsec_ingress.update_fte =
+ mlx5_fpga_ipsec_fs_update_fte_ingress;
+ fpga_ipsec_ingress.delete_fte =
+ mlx5_fpga_ipsec_fs_delete_fte_ingress;
+ fpga_ipsec_ingress.update_root_ft =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
+
+ /* egress */
+ fpga_ipsec_egress.create_flow_table =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
+ fpga_ipsec_egress.destroy_flow_table =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
+ fpga_ipsec_egress.modify_flow_table =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
+ fpga_ipsec_egress.create_flow_group =
+ mlx5_fpga_ipsec_fs_create_flow_group_egress;
+ fpga_ipsec_egress.destroy_flow_group =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
+ fpga_ipsec_egress.create_fte =
+ mlx5_fpga_ipsec_fs_create_fte_egress;
+ fpga_ipsec_egress.update_fte =
+ mlx5_fpga_ipsec_fs_update_fte_egress;
+ fpga_ipsec_egress.delete_fte =
+ mlx5_fpga_ipsec_fs_delete_fte_egress;
+ fpga_ipsec_egress.update_root_ft =
+ mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
+}
+
+static int
+mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ if (attrs->tfc_pad) {
+ mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
+ mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
+ mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attrs->keymat.aes_gcm.iv_algo !=
+ MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
+ mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attrs->keymat.aes_gcm.icv_len != 128) {
+ mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attrs->keymat.aes_gcm.key_len != 128 &&
+ attrs->keymat.aes_gcm.key_len != 256) {
+ mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
+ (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
+ v2_command))) {
+ mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+struct mlx5_accel_esp_xfrm *
+mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs,
+ u32 flags)
+{
+ struct mlx5_fpga_esp_xfrm *fpga_xfrm;
+
+ if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
+ mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
+ mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
+ if (!fpga_xfrm)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&fpga_xfrm->lock);
+ memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
+ sizeof(fpga_xfrm->accel_xfrm.attrs));
+
+ return &fpga_xfrm->accel_xfrm;
+}
+
+void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
+{
+ struct mlx5_fpga_esp_xfrm *fpga_xfrm =
+ container_of(xfrm, struct mlx5_fpga_esp_xfrm,
+ accel_xfrm);
+ /* assuming no sa_ctx are connected to this xfrm_ctx */
+ kfree(fpga_xfrm);
+}
+
+int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ struct mlx5_core_dev *mdev = xfrm->mdev;
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
+ struct mlx5_fpga_esp_xfrm *fpga_xfrm;
+ struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
+
+ int err = 0;
+
+ if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
+ return 0;
+
+ if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
+ mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (is_v2_sadb_supported(fipsec)) {
+ mlx5_core_warn(mdev, "Modify esp is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
+
+ mutex_lock(&fpga_xfrm->lock);
+
+ if (!fpga_xfrm->sa_ctx)
+ /* Unbounded xfrm, chane only sw attrs */
+ goto change_sw_xfrm_attrs;
+
+ /* copy original hw sa */
+ memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
+ mutex_lock(&fipsec->sa_hash_lock);
+ /* remove original hw sa from hash */
+ WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
+ &fpga_xfrm->sa_ctx->hash, rhash_sa));
+ /* update hw_sa with new xfrm attrs*/
+ mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
+ &fpga_xfrm->sa_ctx->hw_sa);
+ /* try to insert new hw_sa to hash */
+ err = rhashtable_insert_fast(&fipsec->sa_hash,
+ &fpga_xfrm->sa_ctx->hash, rhash_sa);
+ if (err)
+ goto rollback_sa;
+
+ /* modify device with new hw_sa */
+ err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
+ MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
+ fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
+ if (err)
+ WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
+ &fpga_xfrm->sa_ctx->hash,
+ rhash_sa));
+rollback_sa:
+ if (err) {
+ /* return original hw_sa to hash */
+ memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
+ sizeof(org_hw_sa));
+ WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
+ &fpga_xfrm->sa_ctx->hash,
+ rhash_sa));
+ }
+ mutex_unlock(&fipsec->sa_hash_lock);
+
+change_sw_xfrm_attrs:
+ if (!err)
+ memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
+ mutex_unlock(&fpga_xfrm->lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
index 26a3e4b56972..2b5e63b0d4d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
@@ -35,33 +35,38 @@
#define __MLX5_FPGA_IPSEC_H__
#include "accel/ipsec.h"
+#include "fs_cmd.h"
#ifdef CONFIG_MLX5_FPGA
-void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
- struct mlx5_accel_ipsec_sa *cmd);
-int mlx5_fpga_ipsec_sa_cmd_wait(void *context);
-
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int counters_count);
+void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm *accel_xfrm,
+ const __be32 saddr[4],
+ const __be32 daddr[4],
+ const __be32 spi, bool is_ipv6);
+void mlx5_fpga_ipsec_delete_sa_ctx(void *context);
+
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev);
+void mlx5_fpga_ipsec_build_fs_cmds(void);
-#else
+struct mlx5_accel_esp_xfrm *
+mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs,
+ u32 flags);
+void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
+int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs);
-static inline void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
- struct mlx5_accel_ipsec_sa *cmd)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
+const struct mlx5_flow_cmds *
+mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
-static inline int mlx5_fpga_ipsec_sa_cmd_wait(void *context)
-{
- return -EOPNOTSUPP;
-}
+#else
static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
@@ -80,6 +85,20 @@ static inline int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev,
return 0;
}
+static inline void *
+mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm *accel_xfrm,
+ const __be32 saddr[4],
+ const __be32 daddr[4],
+ const __be32 spi, bool is_ipv6)
+{
+ return NULL;
+}
+
+static inline void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
+{
+}
+
static inline int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
{
return 0;
@@ -89,6 +108,35 @@ static inline void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
}
+static inline void mlx5_fpga_ipsec_build_fs_cmds(void)
+{
+}
+
+static inline struct mlx5_accel_esp_xfrm *
+mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs,
+ u32 flags)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
+{
+}
+
+static inline int
+mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline const struct mlx5_flow_cmds *
+mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
+{
+ return mlx5_fs_cmd_get_default(type);
+}
+
#endif /* CONFIG_MLX5_FPGA */
#endif /* __MLX5_FPGA_SADB_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 881e2e55840c..645f83cac34d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -39,9 +39,81 @@
#include "mlx5_core.h"
#include "eswitch.h"
-int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft, u32 underlay_qpn,
- bool disconnect)
+static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ u32 underlay_qpn,
+ bool disconnect)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev,
+ u16 vport,
+ enum fs_flow_table_op_mod op_mod,
+ enum fs_flow_table_type type,
+ unsigned int level,
+ unsigned int log_size,
+ struct mlx5_flow_table *next_ft,
+ unsigned int *table_id, u32 flags)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ u32 *in,
+ unsigned int *group_id)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int group_id)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int group_id,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ return 0;
+}
+
+static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft, u32 underlay_qpn,
+ bool disconnect)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
@@ -71,12 +143,14 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_flow_table_op_mod op_mod,
- enum fs_flow_table_type type, unsigned int level,
- unsigned int log_size, struct mlx5_flow_table
- *next_ft, unsigned int *table_id, u32 flags)
+static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+ u16 vport,
+ enum fs_flow_table_op_mod op_mod,
+ enum fs_flow_table_type type,
+ unsigned int level,
+ unsigned int log_size,
+ struct mlx5_flow_table *next_ft,
+ unsigned int *table_id, u32 flags)
{
int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
@@ -125,8 +199,8 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
return err;
}
-int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft)
+static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
@@ -143,9 +217,9 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_table *next_ft)
+static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
{
u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
@@ -188,10 +262,10 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- u32 *in,
- unsigned int *group_id)
+static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ u32 *in,
+ unsigned int *group_id)
{
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -213,9 +287,9 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
return err;
}
-int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- unsigned int group_id)
+static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int group_id)
{
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
@@ -266,16 +340,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
- MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
- MLX5_SET(flow_context, in_flow_context, action, fte->action);
- MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
- MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id);
+ MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
+ MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
+ MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id);
+ MLX5_SET(flow_context, in_flow_context, modify_header_id,
+ fte->action.modify_id);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, sizeof(fte->val));
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -301,7 +376,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
- if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
log_max_flow_counter,
ft->type));
@@ -332,19 +407,21 @@ err_out:
return err;
}
-int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- unsigned group_id,
- struct fs_fte *fte)
+static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte)
{
+ unsigned int group_id = group->id;
+
return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
}
-int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- unsigned group_id,
- int modify_mask,
- struct fs_fte *fte)
+static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int group_id,
+ int modify_mask,
+ struct fs_fte *fte)
{
int opmod;
int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
@@ -357,9 +434,9 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
}
-int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- unsigned int index)
+static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
{
u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
@@ -367,7 +444,7 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, ft->type);
MLX5_SET(delete_fte_in, in, table_id, ft->id);
- MLX5_SET(delete_fte_in, in, flow_index, index);
+ MLX5_SET(delete_fte_in, in, flow_index, fte->index);
if (ft->vport) {
MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
MLX5_SET(delete_fte_in, in, other_vport, 1);
@@ -610,3 +687,53 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+
+static const struct mlx5_flow_cmds mlx5_flow_cmds = {
+ .create_flow_table = mlx5_cmd_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_destroy_flow_table,
+ .modify_flow_table = mlx5_cmd_modify_flow_table,
+ .create_flow_group = mlx5_cmd_create_flow_group,
+ .destroy_flow_group = mlx5_cmd_destroy_flow_group,
+ .create_fte = mlx5_cmd_create_fte,
+ .update_fte = mlx5_cmd_update_fte,
+ .delete_fte = mlx5_cmd_delete_fte,
+ .update_root_ft = mlx5_cmd_update_root_ft,
+};
+
+static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
+ .create_flow_table = mlx5_cmd_stub_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
+ .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
+ .create_flow_group = mlx5_cmd_stub_create_flow_group,
+ .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
+ .create_fte = mlx5_cmd_stub_create_fte,
+ .update_fte = mlx5_cmd_stub_update_fte,
+ .delete_fte = mlx5_cmd_stub_delete_fte,
+ .update_root_ft = mlx5_cmd_stub_update_root_ft,
+};
+
+static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
+{
+ return &mlx5_flow_cmds;
+}
+
+static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
+{
+ return &mlx5_flow_cmd_stubs;
+}
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
+{
+ switch (type) {
+ case FS_FT_NIC_RX:
+ case FS_FT_ESW_EGRESS_ACL:
+ case FS_FT_ESW_INGRESS_ACL:
+ case FS_FT_FDB:
+ case FS_FT_SNIFFER_RX:
+ case FS_FT_SNIFFER_TX:
+ return mlx5_fs_cmd_get_fw_cmds();
+ case FS_FT_NIC_TX:
+ default:
+ return mlx5_fs_cmd_get_stub_cmds();
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 71e2d0f37ad9..6228ba7bfa1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -33,46 +33,52 @@
#ifndef _MLX5_FS_CMD_
#define _MLX5_FS_CMD_
-int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_flow_table_op_mod op_mod,
- enum fs_flow_table_type type, unsigned int level,
- unsigned int log_size, struct mlx5_flow_table
- *next_ft, unsigned int *table_id, u32 flags);
+#include "fs_core.h"
-int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft);
+struct mlx5_flow_cmds {
+ int (*create_flow_table)(struct mlx5_core_dev *dev,
+ u16 vport,
+ enum fs_flow_table_op_mod op_mod,
+ enum fs_flow_table_type type,
+ unsigned int level, unsigned int log_size,
+ struct mlx5_flow_table *next_ft,
+ unsigned int *table_id, u32 flags);
+ int (*destroy_flow_table)(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft);
-int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_table *next_ft);
+ int (*modify_flow_table)(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft);
-int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- u32 *in, unsigned int *group_id);
+ int (*create_flow_group)(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ u32 *in,
+ unsigned int *group_id);
-int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- unsigned int group_id);
+ int (*destroy_flow_group)(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int group_id);
-int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- unsigned group_id,
- struct fs_fte *fte);
+ int (*create_fte)(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg,
+ struct fs_fte *fte);
-int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- unsigned group_id,
- int modify_mask,
- struct fs_fte *fte);
+ int (*update_fte)(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int group_id,
+ int modify_mask,
+ struct fs_fte *fte);
-int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- unsigned int index);
+ int (*delete_fte)(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte);
-int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft, u32 underlay_qpn,
- bool disconnect);
+ int (*update_root_ft)(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ u32 underlay_qpn,
+ bool disconnect);
+};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
@@ -90,4 +96,6 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u32 id,
u64 *packets, u64 *bytes);
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 31fc2cfac3b3..3ba07c7096ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -37,6 +37,8 @@
#include "fs_core.h"
#include "fs_cmd.h"
#include "diag/fs_tracepoint.h"
+#include "accel/ipsec.h"
+#include "fpga/ipsec.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
@@ -425,15 +427,17 @@ static void del_sw_prio(struct fs_node *node)
static void del_hw_flow_table(struct fs_node *node)
{
+ struct mlx5_flow_root_namespace *root;
struct mlx5_flow_table *ft;
struct mlx5_core_dev *dev;
int err;
fs_get_obj(ft, node);
dev = get_dev(&ft->node);
+ root = find_root(&ft->node);
if (node->active) {
- err = mlx5_cmd_destroy_flow_table(dev, ft);
+ err = root->cmds->destroy_flow_table(dev, ft);
if (err)
mlx5_core_warn(dev, "flow steering can't destroy ft\n");
}
@@ -454,6 +458,7 @@ static void del_sw_flow_table(struct fs_node *node)
static void del_sw_hw_rule(struct fs_node *node)
{
+ struct mlx5_flow_root_namespace *root;
struct mlx5_flow_rule *rule;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
@@ -477,19 +482,20 @@ static void del_sw_hw_rule(struct fs_node *node)
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
--fte->dests_size) {
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
- fte->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
update_fte = true;
goto out;
}
- if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
+ if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
update_fte = true;
}
out:
+ root = find_root(&ft->node);
if (update_fte && fte->dests_size) {
- err = mlx5_cmd_update_fte(dev, ft, fg->id, modify_mask, fte);
+ err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
@@ -500,6 +506,7 @@ out:
static void del_hw_fte(struct fs_node *node)
{
+ struct mlx5_flow_root_namespace *root;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_core_dev *dev;
@@ -512,9 +519,9 @@ static void del_hw_fte(struct fs_node *node)
trace_mlx5_fs_del_fte(fte);
dev = get_dev(&ft->node);
+ root = find_root(&ft->node);
if (node->active) {
- err = mlx5_cmd_delete_fte(dev, ft,
- fte->index);
+ err = root->cmds->delete_fte(dev, ft, fte);
if (err)
mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n",
@@ -542,6 +549,7 @@ static void del_sw_fte(struct fs_node *node)
static void del_hw_flow_group(struct fs_node *node)
{
+ struct mlx5_flow_root_namespace *root;
struct mlx5_flow_group *fg;
struct mlx5_flow_table *ft;
struct mlx5_core_dev *dev;
@@ -551,7 +559,8 @@ static void del_hw_flow_group(struct fs_node *node)
dev = get_dev(&ft->node);
trace_mlx5_fs_del_fg(fg);
- if (fg->node.active && mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
+ root = find_root(&ft->node);
+ if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id))
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id);
}
@@ -615,10 +624,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
memcpy(fte->val, match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
- fte->flow_tag = flow_act->flow_tag;
- fte->action = flow_act->action;
- fte->encap_id = flow_act->encap_id;
- fte->modify_id = flow_act->modify_id;
+ fte->action = *flow_act;
tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
@@ -797,15 +803,14 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
struct fs_prio *prio,
struct mlx5_flow_table *ft)
{
+ struct mlx5_flow_root_namespace *root = find_root(&prio->node);
struct mlx5_flow_table *iter;
int i = 0;
int err;
fs_for_each_ft(iter, prio) {
i++;
- err = mlx5_cmd_modify_flow_table(dev,
- iter,
- ft);
+ err = root->cmds->modify_flow_table(dev, iter, ft);
if (err) {
mlx5_core_warn(dev, "Failed to modify flow table %d\n",
iter->id);
@@ -853,12 +858,12 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
if (list_empty(&root->underlay_qpns)) {
/* Don't set any QPN (zero) in case QPN list is empty */
qpn = 0;
- err = mlx5_cmd_update_root_ft(root->dev, ft, qpn, false);
+ err = root->cmds->update_root_ft(root->dev, ft, qpn, false);
} else {
list_for_each_entry(uqp, &root->underlay_qpns, list) {
qpn = uqp->qpn;
- err = mlx5_cmd_update_root_ft(root->dev, ft, qpn,
- false);
+ err = root->cmds->update_root_ft(root->dev, ft,
+ qpn, false);
if (err)
break;
}
@@ -877,6 +882,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
struct mlx5_flow_destination *dest)
{
+ struct mlx5_flow_root_namespace *root;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct fs_fte *fte;
@@ -884,17 +890,16 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
int err = 0;
fs_get_obj(fte, rule->node.parent);
- if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL;
down_write_ref_node(&fte->node);
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
memcpy(&rule->dest_attr, dest, sizeof(*dest));
- err = mlx5_cmd_update_fte(get_dev(&ft->node),
- ft, fg->id,
- modify_mask,
- fte);
+ root = find_root(&ft->node);
+ err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
+ modify_mask, fte);
up_write_ref_node(&fte->node);
return err;
@@ -1035,9 +1040,9 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
- err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
- ft->level, log_table_sz, next_ft, &ft->id,
- ft->flags);
+ err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod,
+ ft->type, ft->level, log_table_sz,
+ next_ft, &ft->id, ft->flags);
if (err)
goto free_ft;
@@ -1053,7 +1058,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
mutex_unlock(&root->chain_lock);
return ft;
destroy_ft:
- mlx5_cmd_destroy_flow_table(root->dev, ft);
+ root->cmds->destroy_flow_table(root->dev, ft);
free_ft:
kfree(ft);
unlock_root:
@@ -1125,6 +1130,7 @@ EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
u32 *fg_in)
{
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
fg_in, match_criteria);
u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
@@ -1152,7 +1158,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
if (IS_ERR(fg))
return fg;
- err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
+ err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
if (err) {
tree_put_node(&fg->node);
return ERR_PTR(err);
@@ -1275,6 +1281,7 @@ add_rule_fte(struct fs_fte *fte,
int dest_num,
bool update_action)
{
+ struct mlx5_flow_root_namespace *root;
struct mlx5_flow_handle *handle;
struct mlx5_flow_table *ft;
int modify_mask = 0;
@@ -1290,12 +1297,13 @@ add_rule_fte(struct fs_fte *fte,
modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
fs_get_obj(ft, fg->node.parent);
+ root = find_root(&fg->node);
if (!(fte->status & FS_FTE_STATUS_EXISTING))
- err = mlx5_cmd_create_fte(get_dev(&ft->node),
- ft, fg->id, fte);
+ err = root->cmds->create_fte(get_dev(&ft->node),
+ ft, fg, fte);
else
- err = mlx5_cmd_update_fte(get_dev(&ft->node),
- ft, fg->id, modify_mask, fte);
+ err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
+ modify_mask, fte);
if (err)
goto free_handle;
@@ -1360,6 +1368,7 @@ out:
static int create_auto_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_core_dev *dev = get_dev(&ft->node);
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *match_criteria_addr;
@@ -1380,7 +1389,7 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
memcpy(match_criteria_addr, fg->mask.match_criteria,
sizeof(fg->mask.match_criteria));
- err = mlx5_cmd_create_flow_group(dev, ft, in, &fg->id);
+ err = root->cmds->create_flow_group(dev, ft, in, &fg->id);
if (!err) {
fg->node.active = true;
trace_mlx5_fs_add_fg(fg);
@@ -1438,16 +1447,17 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act)
{
- if (check_conflicting_actions(flow_act->action, fte->action)) {
+ if (check_conflicting_actions(flow_act->action, fte->action.action)) {
mlx5_core_warn(get_dev(&fte->node),
"Found two FTEs with conflicting actions\n");
return -EEXIST;
}
- if (fte->flow_tag != flow_act->flow_tag) {
+ if (flow_act->has_flow_tag &&
+ fte->action.flow_tag != flow_act->flow_tag) {
mlx5_core_warn(get_dev(&fte->node),
"FTE flow tag %u already exists with different flow tag %u\n",
- fte->flow_tag,
+ fte->action.flow_tag,
flow_act->flow_tag);
return -EEXIST;
}
@@ -1471,12 +1481,12 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
if (ret)
return ERR_PTR(ret);
- old_action = fte->action;
- fte->action |= flow_act->action;
+ old_action = fte->action.action;
+ fte->action.action |= flow_act->action;
handle = add_rule_fte(fte, fg, dest, dest_num,
old_action != flow_act->action);
if (IS_ERR(handle)) {
- fte->action = old_action;
+ fte->action.action = old_action;
return handle;
}
trace_mlx5_fs_set_fte(fte, false);
@@ -1637,7 +1647,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
list_for_each_entry(iter, match_head, list) {
nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
- ida_pre_get(&iter->g->fte_allocator, GFP_KERNEL);
}
search_again_locked:
@@ -1919,7 +1928,6 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
return 0;
new_root_ft = find_next_ft(ft);
-
if (!new_root_ft) {
root->root_ft = NULL;
return 0;
@@ -1928,13 +1936,14 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
if (list_empty(&root->underlay_qpns)) {
/* Don't set any QPN (zero) in case QPN list is empty */
qpn = 0;
- err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, qpn,
- false);
+ err = root->cmds->update_root_ft(root->dev, new_root_ft,
+ qpn, false);
} else {
list_for_each_entry(uqp, &root->underlay_qpns, list) {
qpn = uqp->qpn;
- err = mlx5_cmd_update_root_ft(root->dev, new_root_ft,
- qpn, false);
+ err = root->cmds->update_root_ft(root->dev,
+ new_root_ft, qpn,
+ false);
if (err)
break;
}
@@ -2046,6 +2055,11 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return &steering->sniffer_tx_root_ns->ns;
else
return NULL;
+ case MLX5_FLOW_NAMESPACE_EGRESS:
+ if (steering->egress_root_ns)
+ return &steering->egress_root_ns->ns;
+ else
+ return NULL;
default:
return NULL;
}
@@ -2236,13 +2250,18 @@ static int init_root_tree(struct mlx5_flow_steering *steering,
return 0;
}
-static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering,
- enum fs_flow_table_type
- table_type)
+static struct mlx5_flow_root_namespace
+*create_root_ns(struct mlx5_flow_steering *steering,
+ enum fs_flow_table_type table_type)
{
+ const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_namespace *ns;
+ if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
+ (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
+ cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
+
/* Create the root namespace */
root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
if (!root_ns)
@@ -2250,6 +2269,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering
root_ns->dev = steering->dev;
root_ns->table_type = table_type;
+ root_ns->cmds = cmds;
INIT_LIST_HEAD(&root_ns->underlay_qpns);
@@ -2408,6 +2428,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->fdb_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
+ cleanup_root_ns(steering->egress_root_ns);
mlx5_cleanup_fc_stats(dev);
kmem_cache_destroy(steering->ftes_cache);
kmem_cache_destroy(steering->fgs_cache);
@@ -2553,6 +2574,20 @@ cleanup_root_ns:
return err;
}
+static int init_egress_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->egress_root_ns = create_root_ns(steering,
+ FS_FT_NIC_TX);
+ if (!steering->egress_root_ns)
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->egress_root_ns->ns, 0, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
int mlx5_init_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering;
@@ -2618,6 +2653,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_IPSEC_DEV(dev)) {
+ err = init_egress_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
return 0;
err:
mlx5_cleanup_fs(dev);
@@ -2641,7 +2682,8 @@ int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
goto update_ft_fail;
}
- err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, false);
+ err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
+ false);
if (err) {
mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
underlay_qpn, err);
@@ -2684,7 +2726,8 @@ int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
goto out;
}
- err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, true);
+ err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
+ true);
if (err)
mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
underlay_qpn, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 05262708f14b..e26d3e9d5f9f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -48,6 +48,7 @@ enum fs_node_type {
enum fs_flow_table_type {
FS_FT_NIC_RX = 0x0,
+ FS_FT_NIC_TX = 0x1,
FS_FT_ESW_EGRESS_ACL = 0x2,
FS_FT_ESW_INGRESS_ACL = 0x3,
FS_FT_FDB = 0X4,
@@ -75,6 +76,7 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
+ struct mlx5_flow_root_namespace *egress_root_ns;
};
struct fs_node {
@@ -174,11 +176,8 @@ struct fs_fte {
struct fs_node node;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 dests_size;
- u32 flow_tag;
u32 index;
- u32 action;
- u32 encap_id;
- u32 modify_id;
+ struct mlx5_flow_act action;
enum fs_fte_status status;
struct mlx5_fc *counter;
struct rhash_head hash;
@@ -224,6 +223,7 @@ struct mlx5_flow_root_namespace {
/* Should be held when chaining flow tables */
struct mutex chain_lock;
struct list_head underlay_qpns;
+ const struct mlx5_flow_cmds *cmds;
};
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index ae391e4b7070..13b6f66310c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -58,6 +58,7 @@
#include "eswitch.h"
#include "lib/mlx5.h"
#include "fpga/core.h"
+#include "fpga/ipsec.h"
#include "accel/ipsec.h"
#include "lib/clock.h"
@@ -942,9 +943,9 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto out;
}
- err = mlx5_init_cq_table(dev);
+ err = mlx5_cq_debugfs_init(dev);
if (err) {
- dev_err(&pdev->dev, "failed to initialize cq table\n");
+ dev_err(&pdev->dev, "failed to initialize cq debugfs\n");
goto err_eq_cleanup;
}
@@ -1002,7 +1003,7 @@ err_tables_cleanup:
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
- mlx5_cleanup_cq_table(dev);
+ mlx5_cq_debugfs_cleanup(dev);
err_eq_cleanup:
mlx5_eq_cleanup(dev);
@@ -1023,7 +1024,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
- mlx5_cleanup_cq_table(dev);
+ mlx5_cq_debugfs_cleanup(dev);
mlx5_eq_cleanup(dev);
}
@@ -1173,6 +1174,18 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_affinity_hints;
}
+ err = mlx5_fpga_device_start(dev);
+ if (err) {
+ dev_err(&pdev->dev, "fpga device start failed %d\n", err);
+ goto err_fpga_start;
+ }
+
+ err = mlx5_accel_ipsec_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "IPSec device start failed %d\n", err);
+ goto err_ipsec_start;
+ }
+
err = mlx5_init_fs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1191,17 +1204,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_sriov;
}
- err = mlx5_fpga_device_start(dev);
- if (err) {
- dev_err(&pdev->dev, "fpga device start failed %d\n", err);
- goto err_fpga_start;
- }
- err = mlx5_accel_ipsec_init(dev);
- if (err) {
- dev_err(&pdev->dev, "IPSec device start failed %d\n", err);
- goto err_ipsec_start;
- }
-
if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
@@ -1219,17 +1221,18 @@ out:
return 0;
err_reg_dev:
- mlx5_accel_ipsec_cleanup(dev);
-err_ipsec_start:
- mlx5_fpga_device_stop(dev);
-
-err_fpga_start:
mlx5_sriov_detach(dev);
err_sriov:
mlx5_cleanup_fs(dev);
err_fs:
+ mlx5_accel_ipsec_cleanup(dev);
+
+err_ipsec_start:
+ mlx5_fpga_device_stop(dev);
+
+err_fpga_start:
mlx5_irq_clear_affinity_hints(dev);
err_affinity_hints:
@@ -1296,11 +1299,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
if (mlx5_device_registered(dev))
mlx5_detach_device(dev);
- mlx5_accel_ipsec_cleanup(dev);
- mlx5_fpga_device_stop(dev);
-
mlx5_sriov_detach(dev);
mlx5_cleanup_fs(dev);
+ mlx5_accel_ipsec_cleanup(dev);
+ mlx5_fpga_device_stop(dev);
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
@@ -1657,6 +1659,7 @@ static int __init init(void)
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params();
+ mlx5_fpga_ipsec_build_fs_cmds();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 394552f36fcf..4e25f2b2e0bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -38,16 +38,11 @@
#include <linux/sched.h>
#include <linux/if_link.h>
#include <linux/firmware.h>
+#include <linux/mlx5/cq.h>
#define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0"
-#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
-#define MLX5_VPORT_MANAGER(mdev) \
- (MLX5_CAP_GEN(mdev, vport_group_manager) && \
- (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
- mlx5_core_is_pf(mdev))
-
extern uint mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
@@ -115,9 +110,29 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
+
+int mlx5_eq_init(struct mlx5_core_dev *dev);
+void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
+int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
+ int nent, u64 mask, const char *name,
+ enum mlx5_eq_type type);
+int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
+int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
+int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ u32 *out, int outlen);
+int mlx5_start_eqs(struct mlx5_core_dev *dev);
+void mlx5_stop_eqs(struct mlx5_core_dev *dev);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
void mlx5_cq_tasklet_cb(unsigned long data);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
+int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
u8 access_reg_group);
@@ -186,4 +201,5 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
int mlx5_lag_allow(struct mlx5_core_dev *dev);
int mlx5_lag_forbid(struct mlx5_core_dev *dev);
+void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 6bcfc25350f5..ea66448ba365 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -41,7 +41,7 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
{
- return wq->sz_m1 + 1;
+ return wq->fbc.sz_m1 + 1;
}
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
@@ -62,7 +62,7 @@ static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
{
- return mlx5_cqwq_get_size(wq) << wq->log_stride;
+ return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride;
}
static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
@@ -92,7 +92,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- wq->buf = wq_ctrl->buf.direct.buf;
+ wq->buf = wq_ctrl->buf.frags->buf;
wq->db = wq_ctrl->db.db;
wq_ctrl->mdev = mdev;
@@ -130,7 +130,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- wq->rq.buf = wq_ctrl->buf.direct.buf;
+ wq->rq.buf = wq_ctrl->buf.frags->buf;
wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq);
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
@@ -151,11 +151,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
{
int err;
- wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
- wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
- wq->sz_m1 = (1 << wq->log_sz) - 1;
- wq->log_frag_strides = PAGE_SHIFT - wq->log_stride;
- wq->frag_sz_m1 = (1 << wq->log_frag_strides) - 1;
+ mlx5_core_init_cq_frag_buf(&wq->fbc, cqc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -172,7 +168,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- wq->frag_buf = wq_ctrl->frag_buf;
+ wq->fbc.frag_buf = wq_ctrl->frag_buf;
wq->db = wq_ctrl->db.db;
wq_ctrl->mdev = mdev;
@@ -209,7 +205,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- wq->buf = wq_ctrl->buf.direct.buf;
+ wq->buf = wq_ctrl->buf.frags->buf;
wq->db = wq_ctrl->db.db;
for (i = 0; i < wq->sz_m1; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 718589d0cec2..fca90b94596d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -45,7 +45,7 @@ struct mlx5_wq_param {
struct mlx5_wq_ctrl {
struct mlx5_core_dev *mdev;
- struct mlx5_buf buf;
+ struct mlx5_frag_buf buf;
struct mlx5_db db;
};
@@ -68,14 +68,9 @@ struct mlx5_wq_qp {
};
struct mlx5_cqwq {
- struct mlx5_frag_buf frag_buf;
- __be32 *db;
- u32 sz_m1;
- u32 frag_sz_m1;
- u32 cc; /* consumer counter */
- u8 log_sz;
- u8 log_stride;
- u8 log_frag_strides;
+ struct mlx5_frag_buf_ctrl fbc;
+ __be32 *db;
+ u32 cc; /* consumer counter */
};
struct mlx5_wq_ll {
@@ -131,20 +126,17 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
{
- return wq->cc & wq->sz_m1;
+ return wq->cc & wq->fbc.sz_m1;
}
static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
{
- unsigned int frag = (ix >> wq->log_frag_strides);
-
- return wq->frag_buf.frags[frag].buf +
- ((wq->frag_sz_m1 & ix) << wq->log_stride);
+ return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
}
static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
{
- return wq->cc >> wq->log_sz;
+ return wq->cc >> wq->fbc.log_sz;
}
static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index d56eea310509..f4d9c9975ac3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -76,6 +76,8 @@ config MLXSW_SPECTRUM
depends on PSAMPLE || PSAMPLE=n
depends on BRIDGE || BRIDGE=n
depends on IPV6 || IPV6=n
+ depends on NET_IPGRE || NET_IPGRE=n
+ depends on IPV6_GRE || IPV6_GRE=n
select PARMAN
select MLXFW
default m
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 9463c3fa254f..0cadcabfe86f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -20,7 +20,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_cnt.o spectrum_fid.o \
spectrum_ipip.o spectrum_acl_flex_actions.o \
spectrum_mr.o spectrum_mr_tcam.o \
- spectrum_qdisc.o
+ spectrum_qdisc.o spectrum_span.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 996dc099cd58..3c0d882ba183 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1,6 +1,6 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -849,7 +849,6 @@ struct mlxsw_afa_mirror {
struct mlxsw_afa_resource resource;
int span_id;
u8 local_in_port;
- u8 local_out_port;
bool ingress;
};
@@ -859,7 +858,7 @@ mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
{
block->afa->ops->mirror_del(block->afa->ops_priv,
mirror->local_in_port,
- mirror->local_out_port,
+ mirror->span_id,
mirror->ingress);
kfree(mirror);
}
@@ -875,9 +874,8 @@ mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
}
static struct mlxsw_afa_mirror *
-mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
- u8 local_in_port, u8 local_out_port,
- bool ingress)
+mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port,
+ const struct net_device *out_dev, bool ingress)
{
struct mlxsw_afa_mirror *mirror;
int err;
@@ -887,13 +885,12 @@ mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
return ERR_PTR(-ENOMEM);
err = block->afa->ops->mirror_add(block->afa->ops_priv,
- local_in_port, local_out_port,
+ local_in_port, out_dev,
ingress, &mirror->span_id);
if (err)
goto err_mirror_add;
mirror->ingress = ingress;
- mirror->local_out_port = local_out_port;
mirror->local_in_port = local_in_port;
mirror->resource.destructor = mlxsw_afa_mirror_destructor;
mlxsw_afa_resource_add(block, &mirror->resource);
@@ -920,13 +917,13 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
}
int
-mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
- u8 local_in_port, u8 local_out_port, bool ingress)
+mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port,
+ const struct net_device *out_dev, bool ingress)
{
struct mlxsw_afa_mirror *mirror;
int err;
- mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port,
+ mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev,
ingress);
if (IS_ERR(mirror))
return PTR_ERR(mirror);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index b91f2b0829b0..3a155d104384 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -36,6 +36,7 @@
#define _MLXSW_CORE_ACL_FLEX_ACTIONS_H
#include <linux/types.h>
+#include <linux/netdevice.h>
struct mlxsw_afa;
struct mlxsw_afa_block;
@@ -48,9 +49,10 @@ struct mlxsw_afa_ops {
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
void (*counter_index_put)(void *priv, unsigned int counter_index);
- int (*mirror_add)(void *priv, u8 locol_in_port, u8 local_out_port,
+ int (*mirror_add)(void *priv, u8 local_in_port,
+ const struct net_device *out_dev,
bool ingress, int *p_span_id);
- void (*mirror_del)(void *priv, u8 locol_in_port, u8 local_out_port,
+ void (*mirror_del)(void *priv, u8 local_in_port, int span_id,
bool ingress);
};
@@ -71,7 +73,8 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id);
int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
- u8 local_in_port, u8 local_out_port,
+ u8 local_in_port,
+ const struct net_device *out_dev,
bool ingress);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 85faa87bf42d..e30c6ce3dcb4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1519,8 +1519,7 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
u8 *p_status)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
- dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
- dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
+ dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
bool evreq = mlxsw_pci->cmd.nopoll;
unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
@@ -1532,11 +1531,15 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
if (err)
return err;
- if (in_mbox)
+ if (in_mbox) {
memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
+ in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
+ }
mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
+ if (out_mbox)
+ out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 0e08be41c8e0..e002398364c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1,11 +1,11 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/reg.h
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
* Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
- * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
+ * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -2872,6 +2872,14 @@ static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
MLXSW_REG_DEFINE(ptys, MLXSW_REG_PTYS_ID, MLXSW_REG_PTYS_LEN);
+/* an_disable_admin
+ * Auto negotiation disable administrative configuration
+ * 0 - Device doesn't support AN disable.
+ * 1 - Device supports AN disable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, an_disable_admin, 0x00, 30, 1);
+
/* reg_ptys_local_port
* Local port number.
* Access: Index
@@ -3000,12 +3008,13 @@ MLXSW_ITEM32(reg, ptys, ib_proto_oper, 0x28, 0, 16);
MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32);
static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port,
- u32 proto_admin)
+ u32 proto_admin, bool autoneg)
{
MLXSW_REG_ZERO(ptys, payload);
mlxsw_reg_ptys_local_port_set(payload, local_port);
mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
+ mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg);
}
static inline void mlxsw_reg_ptys_eth_unpack(char *payload,
@@ -6772,8 +6781,104 @@ MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1);
*/
MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1);
+enum mlxsw_reg_mpat_span_type {
+ /* Local SPAN Ethernet.
+ * The original packet is not encapsulated.
+ */
+ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH = 0x0,
+
+ /* Encapsulated Remote SPAN Ethernet L3 GRE.
+ * The packet is encapsulated with GRE header.
+ */
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3 = 0x3,
+};
+
+/* reg_mpat_span_type
+ * SPAN type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, span_type, 0x04, 0, 4);
+
+/* Remote SPAN - Ethernet VLAN
+ * - - - - - - - - - - - - - -
+ */
+
+/* reg_mpat_eth_rspan_vid
+ * Encapsulation header VLAN ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_vid, 0x18, 0, 12);
+
+/* Encapsulated Remote SPAN - Ethernet L2
+ * - - - - - - - - - - - - - - - - - - -
+ */
+
+enum mlxsw_reg_mpat_eth_rspan_version {
+ MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER = 15,
+};
+
+/* reg_mpat_eth_rspan_version
+ * RSPAN mirror header version.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_version, 0x10, 18, 4);
+
+/* reg_mpat_eth_rspan_mac
+ * Destination MAC address.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_mac, 0x12, 6);
+
+/* reg_mpat_eth_rspan_tp
+ * Tag Packet. Indicates whether the mirroring header should be VLAN tagged.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_tp, 0x18, 16, 1);
+
+/* Encapsulated Remote SPAN - Ethernet L3
+ * - - - - - - - - - - - - - - - - - - -
+ */
+
+enum mlxsw_reg_mpat_eth_rspan_protocol {
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV4,
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV6,
+};
+
+/* reg_mpat_eth_rspan_protocol
+ * SPAN encapsulation protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_protocol, 0x18, 24, 4);
+
+/* reg_mpat_eth_rspan_ttl
+ * Encapsulation header Time-to-Live/HopLimit.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_ttl, 0x1C, 4, 8);
+
+/* reg_mpat_eth_rspan_smac
+ * Source MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_smac, 0x22, 6);
+
+/* reg_mpat_eth_rspan_dip*
+ * Destination IP address. The IP version is configured by protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_dip4, 0x4C, 0, 32);
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_dip6, 0x40, 16);
+
+/* reg_mpat_eth_rspan_sip*
+ * Source IP address. The IP version is configured by protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_sip4, 0x5C, 0, 32);
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_sip6, 0x50, 16);
+
static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
- u16 system_port, bool e)
+ u16 system_port, bool e,
+ enum mlxsw_reg_mpat_span_type span_type)
{
MLXSW_REG_ZERO(mpat, payload);
mlxsw_reg_mpat_pa_id_set(payload, pa_id);
@@ -6781,6 +6886,49 @@ static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
mlxsw_reg_mpat_e_set(payload, e);
mlxsw_reg_mpat_qos_set(payload, 1);
mlxsw_reg_mpat_be_set(payload, 1);
+ mlxsw_reg_mpat_span_type_set(payload, span_type);
+}
+
+static inline void mlxsw_reg_mpat_eth_rspan_pack(char *payload, u16 vid)
+{
+ mlxsw_reg_mpat_eth_rspan_vid_set(payload, vid);
+}
+
+static inline void
+mlxsw_reg_mpat_eth_rspan_l2_pack(char *payload,
+ enum mlxsw_reg_mpat_eth_rspan_version version,
+ const char *mac,
+ bool tp)
+{
+ mlxsw_reg_mpat_eth_rspan_version_set(payload, version);
+ mlxsw_reg_mpat_eth_rspan_mac_memcpy_to(payload, mac);
+ mlxsw_reg_mpat_eth_rspan_tp_set(payload, tp);
+}
+
+static inline void
+mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(char *payload, u8 ttl,
+ const char *smac,
+ u32 sip, u32 dip)
+{
+ mlxsw_reg_mpat_eth_rspan_ttl_set(payload, ttl);
+ mlxsw_reg_mpat_eth_rspan_smac_memcpy_to(payload, smac);
+ mlxsw_reg_mpat_eth_rspan_protocol_set(payload,
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV4);
+ mlxsw_reg_mpat_eth_rspan_sip4_set(payload, sip);
+ mlxsw_reg_mpat_eth_rspan_dip4_set(payload, dip);
+}
+
+static inline void
+mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(char *payload, u8 ttl,
+ const char *smac,
+ struct in6_addr sip, struct in6_addr dip)
+{
+ mlxsw_reg_mpat_eth_rspan_ttl_set(payload, ttl);
+ mlxsw_reg_mpat_eth_rspan_smac_memcpy_to(payload, smac);
+ mlxsw_reg_mpat_eth_rspan_protocol_set(payload,
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV6);
+ mlxsw_reg_mpat_eth_rspan_sip6_memcpy_to(payload, (void *)&sip);
+ mlxsw_reg_mpat_eth_rspan_dip6_memcpy_to(payload, (void *)&dip);
}
/* MPAR - Monitoring Port Analyzer Register
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index bf400c75fcc8..7885fc475f7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1,6 +1,6 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum.c
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
@@ -71,11 +71,12 @@
#include "spectrum_cnt.h"
#include "spectrum_dpipe.h"
#include "spectrum_acl_flex_actions.h"
+#include "spectrum_span.h"
#include "../mlxfw/mlxfw.h"
#define MLXSW_FWREV_MAJOR 13
-#define MLXSW_FWREV_MINOR 1530
-#define MLXSW_FWREV_SUBMINOR 152
+#define MLXSW_FWREV_MINOR 1620
+#define MLXSW_FWREV_SUBMINOR 192
#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP_FW_FILENAME \
@@ -487,347 +488,6 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
return 0;
}
-static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
-{
- int i;
-
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
- return -EIO;
-
- mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- MAX_SPAN);
- mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
- sizeof(struct mlxsw_sp_span_entry),
- GFP_KERNEL);
- if (!mlxsw_sp->span.entries)
- return -ENOMEM;
-
- for (i = 0; i < mlxsw_sp->span.entries_count; i++)
- INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
-
- return 0;
-}
-
-static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
-{
- int i;
-
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
-
- WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
- }
- kfree(mlxsw_sp->span.entries);
-}
-
-static struct mlxsw_sp_span_entry *
-mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- struct mlxsw_sp_span_entry *span_entry;
- char mpat_pl[MLXSW_REG_MPAT_LEN];
- u8 local_port = port->local_port;
- int index;
- int i;
- int err;
-
- /* find a free entry to use */
- index = -1;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- if (!mlxsw_sp->span.entries[i].used) {
- index = i;
- span_entry = &mlxsw_sp->span.entries[i];
- break;
- }
- }
- if (index < 0)
- return NULL;
-
- /* create a new port analayzer entry for local_port */
- mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
- if (err)
- return NULL;
-
- span_entry->used = true;
- span_entry->id = index;
- span_entry->ref_count = 1;
- span_entry->local_port = local_port;
- return span_entry;
-}
-
-static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_span_entry *span_entry)
-{
- u8 local_port = span_entry->local_port;
- char mpat_pl[MLXSW_REG_MPAT_LEN];
- int pa_id = span_entry->id;
-
- mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
- span_entry->used = false;
-}
-
-struct mlxsw_sp_span_entry *
-mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
-{
- int i;
-
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
-
- if (curr->used && curr->local_port == local_port)
- return curr;
- }
- return NULL;
-}
-
-static struct mlxsw_sp_span_entry
-*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
-{
- struct mlxsw_sp_span_entry *span_entry;
-
- span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp,
- port->local_port);
- if (span_entry) {
- /* Already exists, just take a reference */
- span_entry->ref_count++;
- return span_entry;
- }
-
- return mlxsw_sp_span_entry_create(port);
-}
-
-static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_span_entry *span_entry)
-{
- WARN_ON(!span_entry->ref_count);
- if (--span_entry->ref_count == 0)
- mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
- return 0;
-}
-
-static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- struct mlxsw_sp_span_inspected_port *p;
- int i;
-
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
-
- list_for_each_entry(p, &curr->bound_ports_list, list)
- if (p->local_port == port->local_port &&
- p->type == MLXSW_SP_SPAN_EGRESS)
- return true;
- }
-
- return false;
-}
-
-static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
- int mtu)
-{
- return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
-}
-
-static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- int err;
-
- /* If port is egress mirrored, the shared buffer size should be
- * updated according to the mtu value
- */
- if (mlxsw_sp_span_is_egress_mirror(port)) {
- u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
-
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- if (err) {
- netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
- return err;
- }
- }
-
- return 0;
-}
-
-static struct mlxsw_sp_span_inspected_port *
-mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- struct mlxsw_sp_port *port,
- bool bind)
-{
- struct mlxsw_sp_span_inspected_port *p;
-
- list_for_each_entry(p, &span_entry->bound_ports_list, list)
- if (type == p->type &&
- port->local_port == p->local_port &&
- bind == p->bound)
- return p;
- return NULL;
-}
-
-static int
-mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char mpar_pl[MLXSW_REG_MPAR_LEN];
- int pa_id = span_entry->id;
-
- /* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e) type, bind, pa_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
-}
-
-static int
-mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
-{
- struct mlxsw_sp_span_inspected_port *inspected_port;
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- int i;
- int err;
-
- /* A given (source port, direction) can only be bound to one analyzer,
- * so if a binding is requested, check for conflicts.
- */
- if (bind)
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr =
- &mlxsw_sp->span.entries[i];
-
- if (mlxsw_sp_span_entry_bound_port_find(curr, type,
- port, bind))
- return -EEXIST;
- }
-
- /* if it is an egress SPAN, bind a shared buffer to it */
- if (type == MLXSW_SP_SPAN_EGRESS) {
- u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
- port->dev->mtu);
-
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- if (err) {
- netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
- return err;
- }
- }
-
- if (bind) {
- err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- true);
- if (err)
- goto err_port_bind;
- }
-
- inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
- if (!inspected_port) {
- err = -ENOMEM;
- goto err_inspected_port_alloc;
- }
- inspected_port->local_port = port->local_port;
- inspected_port->type = type;
- inspected_port->bound = bind;
- list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
-
- return 0;
-
-err_inspected_port_alloc:
- if (bind)
- mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- false);
-err_port_bind:
- if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- }
- return err;
-}
-
-static void
-mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
-{
- struct mlxsw_sp_span_inspected_port *inspected_port;
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
-
- inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
- port, bind);
- if (!inspected_port)
- return;
-
- if (bind)
- mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- false);
- /* remove the SBIB buffer if it was egress SPAN */
- if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- }
-
- mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
-
- list_del(&inspected_port->list);
- kfree(inspected_port);
-}
-
-int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- struct mlxsw_sp_port *to,
- enum mlxsw_sp_span_type type, bool bind)
-{
- struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
- struct mlxsw_sp_span_entry *span_entry;
- int err;
-
- span_entry = mlxsw_sp_span_entry_get(to);
- if (!span_entry)
- return -ENOENT;
-
- netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
- span_entry->id);
-
- err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
- if (err)
- goto err_port_bind;
-
- return 0;
-
-err_port_bind:
- mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
- return err;
-}
-
-void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port,
- enum mlxsw_sp_span_type type, bool bind)
-{
- struct mlxsw_sp_span_entry *span_entry;
-
- span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp,
- destination_port);
- if (!span_entry) {
- netdev_err(from->dev, "no span entry found\n");
- return;
- }
-
- netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
- span_entry->id);
- mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
-}
-
static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable, u32 rate)
{
@@ -1380,6 +1040,16 @@ mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
xstats->tail_drop[i] =
mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
}
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
+ i, ppcnt_pl);
+ if (err)
+ continue;
+
+ xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
+ xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
+ }
}
static void update_stats_cache(struct work_struct *work)
@@ -1604,7 +1274,6 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress)
{
enum mlxsw_sp_span_type span_type;
- struct mlxsw_sp_port *to_port;
struct net_device *to_dev;
to_dev = tcf_mirred_dev(a);
@@ -1613,17 +1282,10 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
return -EINVAL;
}
- if (!mlxsw_sp_port_dev_check(to_dev)) {
- netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
- return -EOPNOTSUPP;
- }
- to_port = netdev_priv(to_dev);
-
- mirror->to_local_port = to_port->local_port;
mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type,
- true);
+ return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type,
+ true, &mirror->span_id);
}
static void
@@ -1634,7 +1296,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
span_type = mirror->ingress ?
MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port,
+ mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
span_type, true);
}
@@ -2728,7 +2390,7 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
int err;
autoneg = mlxsw_sp_port->link.autoneg;
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
@@ -2762,7 +2424,7 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
bool autoneg;
int err;
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
@@ -2780,7 +2442,7 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
}
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_new);
+ eth_proto_new, autoneg);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
@@ -2991,7 +2653,7 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_admin);
+ eth_proto_admin, mlxsw_sp_port->link.autoneg);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
@@ -4021,14 +3683,24 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_afa_init;
}
+ err = mlxsw_sp_span_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
+ goto err_span_init;
+ }
+
+ /* Initialize router after SPAN is initialized, so that the FIB and
+ * neighbor event handlers can issue SPAN respin.
+ */
err = mlxsw_sp_router_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
goto err_router_init;
}
- /* Initialize netdevice notifier after router is initialized, so that
- * the event handler can use router structures.
+ /* Initialize netdevice notifier after router and SPAN is initialized,
+ * so that the event handler can use router structures and call SPAN
+ * respin.
*/
mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
@@ -4037,12 +3709,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_netdev_notifier;
}
- err = mlxsw_sp_span_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
- goto err_span_init;
- }
-
err = mlxsw_sp_acl_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
@@ -4068,12 +3734,12 @@ err_ports_create:
err_dpipe_init:
mlxsw_sp_acl_fini(mlxsw_sp);
err_acl_init:
- mlxsw_sp_span_fini(mlxsw_sp);
-err_span_init:
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
err_netdev_notifier:
mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ mlxsw_sp_span_fini(mlxsw_sp);
+err_span_init:
mlxsw_sp_afa_fini(mlxsw_sp);
err_afa_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -4099,9 +3765,9 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
- mlxsw_sp_span_fini(mlxsw_sp);
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
@@ -4113,12 +3779,8 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
}
static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
- .used_max_vepa_channels = 1,
- .max_vepa_channels = 0,
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
- .used_max_pgt = 1,
- .max_pgt = 0,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
@@ -4144,70 +3806,6 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.resource_query_enable = 1,
};
-static bool
-mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack,
- u64 size)
-{
- const struct mlxsw_config_profile *profile;
-
- profile = &mlxsw_sp_config_profile;
- if (size % profile->kvd_hash_granularity) {
- NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity");
- return false;
- }
- return true;
-}
-
-static int
-mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size,
- struct netlink_ext_ack *extack)
-{
- NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed");
- return -EINVAL;
-}
-
-static int
-mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size,
- struct netlink_ext_ack *extack)
-{
- if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
- return -EINVAL;
-
- return 0;
-}
-
-static int
-mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
-
- if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
- return -EINVAL;
-
- if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) {
- NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum");
- return -EINVAL;
- }
- return 0;
-}
-
-static int
-mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
-
- if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
- return -EINVAL;
-
- if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) {
- NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum");
- return -EINVAL;
- }
- return 0;
-}
-
static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
@@ -4216,23 +3814,10 @@ static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
}
-static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = {
- .size_validate = mlxsw_sp_resource_kvd_size_validate,
-};
-
static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
- .size_validate = mlxsw_sp_resource_kvd_linear_size_validate,
.occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
};
-static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
- .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
-};
-
-static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
- .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
-};
-
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
@@ -4291,17 +3876,16 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
- true, kvd_size,
- MLXSW_SP_RESOURCE_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
DEVLINK_RESOURCE_ID_PARENT_TOP,
&kvd_size_params,
- &mlxsw_sp_resource_kvd_ops);
+ NULL);
if (err)
return err;
linear_size = profile->kvd_linear_size;
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
- false, linear_size,
+ linear_size,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD,
&linear_size_params,
@@ -4309,27 +3893,31 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
return err;
+ err = mlxsw_sp_kvdl_resources_register(devlink);
+ if (err)
+ return err;
+
double_size = kvd_size - linear_size;
double_size *= profile->kvd_hash_double_parts;
double_size /= profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts;
double_size = rounddown(double_size, profile->kvd_hash_granularity);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
- false, double_size,
+ double_size,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
MLXSW_SP_RESOURCE_KVD,
&hash_double_size_params,
- &mlxsw_sp_resource_kvd_hash_double_ops);
+ NULL);
if (err)
return err;
single_size = kvd_size - double_size - linear_size;
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
- false, single_size,
+ single_size,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD,
&hash_single_size_params,
- &mlxsw_sp_resource_kvd_hash_single_ops);
+ NULL);
if (err)
return err;
@@ -4583,13 +4171,11 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
u16 lag_id;
if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Exceeded number of supported LAG devices");
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
return false;
}
if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
- NL_SET_ERR_MSG(extack,
- "spectrum: LAG device using unsupported Tx type");
+ NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
return false;
}
return true;
@@ -4831,8 +4417,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
!netif_is_lag_master(upper_dev) &&
!netif_is_bridge_master(upper_dev) &&
!netif_is_ovs_master(upper_dev)) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Unknown upper device type");
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
if (!info->linking)
@@ -4841,8 +4426,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
upper_dev))) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Enslaving a port to a device that already has an upper device is not supported");
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
if (netif_is_lag_master(upper_dev) &&
@@ -4850,24 +4434,20 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
info->upper_info, extack))
return -EINVAL;
if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Master device is a LAG master and this device has a VLAN");
+ NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
return -EINVAL;
}
if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
!netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Can not put a VLAN on a LAG port");
+ NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
return -EINVAL;
}
if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Master device is an OVS master and this device has a VLAN");
+ NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
return -EINVAL;
}
if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Can not put a VLAN on an OVS port");
+ NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
return -EINVAL;
}
break;
@@ -4980,7 +4560,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
if (!netif_is_bridge_master(upper_dev)) {
- NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers");
+ NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers");
return -EINVAL;
}
if (!info->linking)
@@ -4989,7 +4569,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
upper_dev))) {
- NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
break;
@@ -5067,10 +4647,18 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mlxsw_sp_span_entry *span_entry;
struct mlxsw_sp *mlxsw_sp;
int err = 0;
mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
+ if (event == NETDEV_UNREGISTER) {
+ span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
+ if (span_entry)
+ mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
+ }
+ mlxsw_sp_span_respin(mlxsw_sp);
+
if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
event, ptr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 92064db2ae44..21bee8f19894 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -70,16 +70,23 @@
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES "singles"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS "chunks"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks"
enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
};
struct mlxsw_sp_port;
struct mlxsw_sp_rif;
+struct mlxsw_sp_span_entry;
struct mlxsw_sp_upper {
struct net_device *dev;
@@ -111,35 +118,13 @@ struct mlxsw_sp_mid {
unsigned long *ports_in_mid; /* bits array */
};
-enum mlxsw_sp_span_type {
- MLXSW_SP_SPAN_EGRESS,
- MLXSW_SP_SPAN_INGRESS
-};
-
-struct mlxsw_sp_span_inspected_port {
- struct list_head list;
- enum mlxsw_sp_span_type type;
- u8 local_port;
-
- /* Whether this is a directly bound mirror (port-to-port) or an ACL. */
- bool bound;
-};
-
-struct mlxsw_sp_span_entry {
- u8 local_port;
- bool used;
- struct list_head bound_ports_list;
- int ref_count;
- int id;
-};
-
enum mlxsw_sp_port_mall_action_type {
MLXSW_SP_PORT_MALL_MIRROR,
MLXSW_SP_PORT_MALL_SAMPLE,
};
struct mlxsw_sp_port_mall_mirror_tc_entry {
- u8 to_local_port;
+ int span_id;
bool ingress;
};
@@ -226,6 +211,8 @@ struct mlxsw_sp_port_xstats {
u64 wred_drop[TC_MAX_QUEUE];
u64 tail_drop[TC_MAX_QUEUE];
u64 backlog[TC_MAX_QUEUE];
+ u64 tx_bytes[IEEE_8021QAZ_MAX_TCS];
+ u64 tx_packets[IEEE_8021QAZ_MAX_TCS];
};
struct mlxsw_sp_port {
@@ -263,6 +250,7 @@ struct mlxsw_sp_port {
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
struct mlxsw_sp_qdisc *root_qdisc;
+ struct mlxsw_sp_qdisc *tclass_qdiscs;
unsigned acl_rule_count;
struct mlxsw_sp_acl_block *ing_acl_block;
struct mlxsw_sp_acl_block *eg_acl_block;
@@ -400,16 +388,6 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
-int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- struct mlxsw_sp_port *to,
- enum mlxsw_sp_span_type type,
- bool bind);
-void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from,
- u8 destination_port,
- enum mlxsw_sp_span_type type,
- bool bind);
-struct mlxsw_sp_span_entry *
-mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port);
/* spectrum_dcb.c */
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@ -465,6 +443,7 @@ int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_kvdl_resources_register(struct devlink *devlink);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 92d90ed7207e..79b1fa27a9a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -160,6 +160,13 @@ bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
return block->disable_count;
}
+static bool
+mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ /* We hold a reference on ruleset ourselves */
+ return ruleset->ref_count == 2;
+}
+
static int
mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
@@ -341,21 +348,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_ht_insert;
- if (!chain_index) {
- /* We only need ruleset with chain index 0, the implicit one,
- * to be directly bound to device. The rest of the rulesets
- * are bound by "Goto action set".
- */
- err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
- if (err)
- goto err_ruleset_bind;
- }
-
return ruleset;
-err_ruleset_bind:
- rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
- mlxsw_sp_acl_ruleset_ht_params);
err_ht_insert:
ops->ruleset_del(mlxsw_sp, ruleset->priv);
err_ops_ruleset_add:
@@ -369,12 +363,8 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
- u32 chain_index = ruleset->ht_key.chain_index;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- if (!chain_index)
- mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
ops->ruleset_del(mlxsw_sp, ruleset->priv);
@@ -577,7 +567,6 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct net_device *out_dev)
{
struct mlxsw_sp_acl_block_binding *binding;
- struct mlxsw_sp_port *out_port;
struct mlxsw_sp_port *in_port;
if (!list_is_singular(&block->binding_list))
@@ -586,16 +575,10 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
binding = list_first_entry(&block->binding_list,
struct mlxsw_sp_acl_block_binding, list);
in_port = binding->mlxsw_sp_port;
- if (!mlxsw_sp_port_dev_check(out_dev))
- return -EINVAL;
-
- out_port = netdev_priv(out_dev);
- if (out_port->mlxsw_sp != mlxsw_sp)
- return -EINVAL;
return mlxsw_afa_block_append_mirror(rulei->act_block,
in_port->local_port,
- out_port->local_port,
+ out_dev,
binding->ingress);
}
@@ -700,10 +683,25 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_insert;
+ if (!ruleset->ht_key.chain_index &&
+ mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
+ /* We only need ruleset with chain index 0, the implicit
+ * one, to be directly bound to device. The rest of the
+ * rulesets are bound by "Goto action set".
+ */
+ err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset,
+ ruleset->ht_key.block);
+ if (err)
+ goto err_ruleset_block_bind;
+ }
+
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
ruleset->ht_key.block->rule_count++;
return 0;
+err_ruleset_block_bind:
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
+ mlxsw_sp_acl_rule_ht_params);
err_rhashtable_insert:
ops->rule_del(mlxsw_sp, rule->priv);
return err;
@@ -717,6 +715,10 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
ruleset->ht_key.block->rule_count--;
list_del(&rule->list);
+ if (!ruleset->ht_key.chain_index &&
+ mlxsw_sp_acl_ruleset_is_singular(ruleset))
+ mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
+ ruleset->ht_key.block);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
ops->rule_del(mlxsw_sp, rule->priv);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 6ca6894125f0..510ce48d87f7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -1,6 +1,6 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
*
@@ -35,6 +35,7 @@
#include "spectrum_acl_flex_actions.h"
#include "core_acl_flex_actions.h"
+#include "spectrum_span.h"
#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1
@@ -125,40 +126,23 @@ mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index)
}
static int
-mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, u8 local_out_port,
+mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port,
+ const struct net_device *out_dev,
bool ingress, int *p_span_id)
{
- struct mlxsw_sp_port *in_port, *out_port;
- struct mlxsw_sp_span_entry *span_entry;
+ struct mlxsw_sp_port *in_port;
struct mlxsw_sp *mlxsw_sp = priv;
enum mlxsw_sp_span_type type;
- int err;
type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- out_port = mlxsw_sp->ports[local_out_port];
in_port = mlxsw_sp->ports[local_in_port];
- err = mlxsw_sp_span_mirror_add(in_port, out_port, type, false);
- if (err)
- return err;
-
- span_entry = mlxsw_sp_span_entry_find(mlxsw_sp, local_out_port);
- if (!span_entry) {
- err = -ENOENT;
- goto err_span_entry_find;
- }
-
- *p_span_id = span_entry->id;
- return 0;
-
-err_span_entry_find:
- mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
- return err;
+ return mlxsw_sp_span_mirror_add(in_port, out_dev, type,
+ false, p_span_id);
}
static void
-mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port,
- bool ingress)
+mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
{
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *in_port;
@@ -167,7 +151,7 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port,
type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
in_port = mlxsw_sp->ports[local_in_port];
- mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+ mlxsw_sp_span_mirror_del(in_port, span_id, type, false);
}
static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index c6e180c2be1e..ad1b548e3cac 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -228,10 +228,6 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
- if (err)
- goto err_group_update;
-
err = rhashtable_init(&group->chunk_ht,
&mlxsw_sp_acl_tcam_chunk_ht_params);
if (err)
@@ -240,7 +236,6 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
return 0;
err_rhashtable_init:
-err_group_update:
mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 7502e53447bd..98d896c14b87 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -33,126 +33,125 @@
*/
#include <net/ip_tunnels.h>
+#include <net/ip6_tunnel.h>
#include "spectrum_ipip.h"
struct ip_tunnel_parm
-mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev)
+mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev)
{
struct ip_tunnel *tun = netdev_priv(ol_dev);
return tun->parms;
}
-static bool mlxsw_sp_ipip_parms_has_ikey(struct ip_tunnel_parm parms)
+struct __ip6_tnl_parm
+mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev)
+{
+ struct ip6_tnl *tun = netdev_priv(ol_dev);
+
+ return tun->parms;
+}
+
+static bool mlxsw_sp_ipip_parms4_has_ikey(struct ip_tunnel_parm parms)
{
return !!(parms.i_flags & TUNNEL_KEY);
}
-static bool mlxsw_sp_ipip_parms_has_okey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms4_has_okey(struct ip_tunnel_parm parms)
{
return !!(parms.o_flags & TUNNEL_KEY);
}
-static u32 mlxsw_sp_ipip_parms_ikey(struct ip_tunnel_parm parms)
+static u32 mlxsw_sp_ipip_parms4_ikey(struct ip_tunnel_parm parms)
{
- return mlxsw_sp_ipip_parms_has_ikey(parms) ?
+ return mlxsw_sp_ipip_parms4_has_ikey(parms) ?
be32_to_cpu(parms.i_key) : 0;
}
-static u32 mlxsw_sp_ipip_parms_okey(struct ip_tunnel_parm parms)
+static u32 mlxsw_sp_ipip_parms4_okey(struct ip_tunnel_parm parms)
{
- return mlxsw_sp_ipip_parms_has_okey(parms) ?
+ return mlxsw_sp_ipip_parms4_has_okey(parms) ?
be32_to_cpu(parms.o_key) : 0;
}
-static __be32 mlxsw_sp_ipip_parms_saddr4(struct ip_tunnel_parm parms)
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms4_saddr(struct ip_tunnel_parm parms)
{
- return parms.iph.saddr;
+ return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.saddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms_saddr(enum mlxsw_sp_l3proto proto,
- struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms6_saddr(struct __ip6_tnl_parm parms)
{
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- return (union mlxsw_sp_l3addr) {
- .addr4 = mlxsw_sp_ipip_parms_saddr4(parms),
- };
- case MLXSW_SP_L3_PROTO_IPV6:
- break;
- }
-
- WARN_ON(1);
- return (union mlxsw_sp_l3addr) {
- .addr4 = 0,
- };
+ return (union mlxsw_sp_l3addr) { .addr6 = parms.laddr };
}
-static __be32 mlxsw_sp_ipip_parms_daddr4(struct ip_tunnel_parm parms)
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms4_daddr(struct ip_tunnel_parm parms)
{
- return parms.iph.daddr;
+ return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.daddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms_daddr(enum mlxsw_sp_l3proto proto,
- struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms6_daddr(struct __ip6_tnl_parm parms)
+{
+ return (union mlxsw_sp_l3addr) { .addr6 = parms.raddr };
+}
+
+union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev)
{
+ struct ip_tunnel_parm parms4;
+ struct __ip6_tnl_parm parms6;
+
switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- return (union mlxsw_sp_l3addr) {
- .addr4 = mlxsw_sp_ipip_parms_daddr4(parms),
- };
+ parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ return mlxsw_sp_ipip_parms4_saddr(parms4);
case MLXSW_SP_L3_PROTO_IPV6:
- break;
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ return mlxsw_sp_ipip_parms6_saddr(parms6);
}
WARN_ON(1);
- return (union mlxsw_sp_l3addr) {
- .addr4 = 0,
- };
-}
-
-static bool mlxsw_sp_ipip_netdev_has_ikey(const struct net_device *ol_dev)
-{
- return mlxsw_sp_ipip_parms_has_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev));
+ return (union mlxsw_sp_l3addr) {0};
}
-static bool mlxsw_sp_ipip_netdev_has_okey(const struct net_device *ol_dev)
+static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
{
- return mlxsw_sp_ipip_parms_has_okey(mlxsw_sp_ipip_netdev_parms(ol_dev));
-}
-static u32 mlxsw_sp_ipip_netdev_ikey(const struct net_device *ol_dev)
-{
- return mlxsw_sp_ipip_parms_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev));
-}
+ struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
-static u32 mlxsw_sp_ipip_netdev_okey(const struct net_device *ol_dev)
-{
- return mlxsw_sp_ipip_parms_okey(mlxsw_sp_ipip_netdev_parms(ol_dev));
+ return mlxsw_sp_ipip_parms4_daddr(parms4).addr4;
}
-union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
- return mlxsw_sp_ipip_parms_saddr(proto,
- mlxsw_sp_ipip_netdev_parms(ol_dev));
-}
+ struct ip_tunnel_parm parms4;
+ struct __ip6_tnl_parm parms6;
-static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
-{
- return mlxsw_sp_ipip_parms_daddr4(mlxsw_sp_ipip_netdev_parms(ol_dev));
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ return mlxsw_sp_ipip_parms4_daddr(parms4);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ return mlxsw_sp_ipip_parms6_daddr(parms6);
+ }
+
+ WARN_ON(1);
+ return (union mlxsw_sp_l3addr) {0};
}
-static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
- const struct net_device *ol_dev)
+bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
{
- return mlxsw_sp_ipip_parms_daddr(proto,
- mlxsw_sp_ipip_netdev_parms(ol_dev));
+ union mlxsw_sp_l3addr naddr = {0};
+
+ return !memcmp(&addr, &naddr, sizeof(naddr));
}
static int
@@ -176,12 +175,17 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
u32 tunnel_index,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
- bool has_ikey = mlxsw_sp_ipip_netdev_has_ikey(ipip_entry->ol_dev);
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
- u32 ikey = mlxsw_sp_ipip_netdev_ikey(ipip_entry->ol_dev);
char rtdp_pl[MLXSW_REG_RTDP_LEN];
+ struct ip_tunnel_parm parms;
unsigned int type_check;
+ bool has_ikey;
u32 daddr4;
+ u32 ikey;
+
+ parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
+ has_ikey = mlxsw_sp_ipip_parms4_has_ikey(parms);
+ ikey = mlxsw_sp_ipip_parms4_ikey(parms);
mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
@@ -243,15 +247,14 @@ static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto,
{
union mlxsw_sp_l3addr saddr = mlxsw_sp_ipip_netdev_saddr(proto, ol_dev);
union mlxsw_sp_l3addr daddr = mlxsw_sp_ipip_netdev_daddr(proto, ol_dev);
- union mlxsw_sp_l3addr naddr = {0};
/* Tunnels with unset local or remote address are valid in Linux and
* used for lightweight tunnels (LWT) and Non-Broadcast Multi-Access
* (NBMA) tunnels. In principle these can be offloaded, but the driver
* currently doesn't support this. So punt.
*/
- return memcmp(&saddr, &naddr, sizeof(naddr)) &&
- memcmp(&daddr, &naddr, sizeof(naddr));
+ return !mlxsw_sp_l3addr_is_zero(saddr) &&
+ !mlxsw_sp_l3addr_is_zero(daddr);
}
static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp,
@@ -273,14 +276,15 @@ static struct mlxsw_sp_rif_ipip_lb_config
mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
+ struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
- lb_ipipt = mlxsw_sp_ipip_netdev_has_okey(ol_dev) ?
+ lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(parms) ?
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
return (struct mlxsw_sp_rif_ipip_lb_config){
.lb_ipipt = lb_ipipt,
- .okey = mlxsw_sp_ipip_netdev_okey(ol_dev),
+ .okey = mlxsw_sp_ipip_parms4_okey(parms),
.ul_protocol = MLXSW_SP_L3_PROTO_IPV4,
.saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
ol_dev),
@@ -300,16 +304,12 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
bool update_nhs = false;
int err = 0;
- new_parms = mlxsw_sp_ipip_netdev_parms(ipip_entry->ol_dev);
+ new_parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
- new_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4,
- new_parms);
- old_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4,
- ipip_entry->parms);
- new_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4,
- new_parms);
- old_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4,
- ipip_entry->parms);
+ new_saddr = mlxsw_sp_ipip_parms4_saddr(new_parms);
+ old_saddr = mlxsw_sp_ipip_parms4_saddr(ipip_entry->parms4);
+ new_daddr = mlxsw_sp_ipip_parms4_daddr(new_parms);
+ old_daddr = mlxsw_sp_ipip_parms4_daddr(ipip_entry->parms4);
if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) {
u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
@@ -326,14 +326,14 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
}
update_tunnel = true;
- } else if ((mlxsw_sp_ipip_parms_okey(ipip_entry->parms) !=
- mlxsw_sp_ipip_parms_okey(new_parms)) ||
- ipip_entry->parms.link != new_parms.link) {
+ } else if ((mlxsw_sp_ipip_parms4_okey(ipip_entry->parms4) !=
+ mlxsw_sp_ipip_parms4_okey(new_parms)) ||
+ ipip_entry->parms4.link != new_parms.link) {
update_tunnel = true;
} else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) {
update_nhs = true;
- } else if (mlxsw_sp_ipip_parms_ikey(ipip_entry->parms) !=
- mlxsw_sp_ipip_parms_ikey(new_parms)) {
+ } else if (mlxsw_sp_ipip_parms4_ikey(ipip_entry->parms4) !=
+ mlxsw_sp_ipip_parms4_ikey(new_parms)) {
update_decap = true;
}
@@ -350,7 +350,7 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
false, false, false,
extack);
- ipip_entry->parms = new_parms;
+ ipip_entry->parms4 = new_parms;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index 04b08d9d76e9..6909d867bb59 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -37,14 +37,19 @@
#include "spectrum_router.h"
#include <net/ip_fib.h>
+#include <linux/if_tunnel.h>
struct ip_tunnel_parm
-mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev);
+mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev);
+struct __ip6_tnl_parm
+mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev);
union mlxsw_sp_l3addr
mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev);
+bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr);
+
enum mlxsw_sp_ipip_type {
MLXSW_SP_IPIP_TYPE_GRE4,
MLXSW_SP_IPIP_TYPE_MAX,
@@ -56,7 +61,9 @@ struct mlxsw_sp_ipip_entry {
struct mlxsw_sp_rif_ipip_lb *ol_lb;
struct mlxsw_sp_fib_entry *decap_fib_entry;
struct list_head ipip_list_node;
- struct ip_tunnel_parm parms;
+ union {
+ struct ip_tunnel_parm parms4;
+ };
};
struct mlxsw_sp_ipip_ops {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 55f9d2d70f9e..85503e93b93f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -67,7 +67,7 @@ struct mlxsw_sp_kvdl_part_info {
struct mlxsw_sp_kvdl_part {
struct list_head list;
- const struct mlxsw_sp_kvdl_part_info *info;
+ struct mlxsw_sp_kvdl_part_info *info;
unsigned long usage[0]; /* Entries */
};
@@ -188,21 +188,27 @@ int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+enum mlxsw_sp_kvdl_part_id {
+ MLXSW_SP_KVDL_PART_SINGLE,
+ MLXSW_SP_KVDL_PART_CHUNKS,
+ MLXSW_SP_KVDL_PART_LARGE_CHUNKS,
+};
+
static const struct mlxsw_sp_kvdl_part_info kvdl_parts_info[] = {
{
- .part_index = 0,
+ .part_index = MLXSW_SP_KVDL_PART_SINGLE,
.start_index = MLXSW_SP_KVDL_SINGLE_BASE,
.end_index = MLXSW_SP_KVDL_SINGLE_END,
.alloc_size = 1,
},
{
- .part_index = 1,
+ .part_index = MLXSW_SP_KVDL_PART_CHUNKS,
.start_index = MLXSW_SP_KVDL_CHUNKS_BASE,
.end_index = MLXSW_SP_KVDL_CHUNKS_END,
.alloc_size = MLXSW_SP_CHUNK_MAX,
},
{
- .part_index = 2,
+ .part_index = MLXSW_SP_KVDL_PART_LARGE_CHUNKS,
.start_index = MLXSW_SP_KVDL_LARGE_CHUNKS_BASE,
.end_index = MLXSW_SP_KVDL_LARGE_CHUNKS_END,
.alloc_size = MLXSW_SP_LARGE_CHUNK_MAX,
@@ -222,27 +228,76 @@ mlxsw_sp_kvdl_part_find(struct mlxsw_sp *mlxsw_sp, unsigned int part_index)
return NULL;
}
+static void
+mlxsw_sp_kvdl_part_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_kvdl_part *part, unsigned int size)
+{
+ struct mlxsw_sp_kvdl_part_info *info = part->info;
+
+ if (list_is_last(&part->list, &mlxsw_sp->kvdl->parts_list)) {
+ info->end_index = size - 1;
+ } else {
+ struct mlxsw_sp_kvdl_part *last_part;
+
+ last_part = list_next_entry(part, list);
+ info->start_index = last_part->info->end_index + 1;
+ info->end_index = info->start_index + size - 1;
+ }
+}
+
static int mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
unsigned int part_index)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
const struct mlxsw_sp_kvdl_part_info *info;
+ enum mlxsw_sp_resource_id resource_id;
struct mlxsw_sp_kvdl_part *part;
+ bool need_update = true;
unsigned int nr_entries;
size_t usage_size;
+ u64 resource_size;
+ int err;
info = &kvdl_parts_info[part_index];
- nr_entries = (info->end_index - info->start_index + 1) /
- info->alloc_size;
+ switch (part_index) {
+ case MLXSW_SP_KVDL_PART_SINGLE:
+ resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE;
+ break;
+ case MLXSW_SP_KVDL_PART_CHUNKS:
+ resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS;
+ break;
+ case MLXSW_SP_KVDL_PART_LARGE_CHUNKS:
+ resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = devlink_resource_size_get(devlink, resource_id, &resource_size);
+ if (err) {
+ need_update = false;
+ resource_size = info->end_index - info->start_index + 1;
+ }
+
+ nr_entries = div_u64(resource_size, info->alloc_size);
usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
if (!part)
return -ENOMEM;
- part->info = info;
- list_add(&part->list, &mlxsw_sp->kvdl->parts_list);
+ part->info = kmemdup(info, sizeof(*part->info), GFP_KERNEL);
+ if (!part->info)
+ goto err_part_info_alloc;
+ list_add(&part->list, &mlxsw_sp->kvdl->parts_list);
+ if (need_update)
+ mlxsw_sp_kvdl_part_update(mlxsw_sp, part, resource_size);
return 0;
+
+err_part_info_alloc:
+ kfree(part);
+ return -ENOMEM;
}
static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp *mlxsw_sp,
@@ -255,6 +310,7 @@ static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp *mlxsw_sp,
return;
list_del(&part->list);
+ kfree(part->info);
kfree(part);
}
@@ -312,6 +368,123 @@ u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
return occ;
}
+static u64 mlxsw_sp_kvdl_single_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp_kvdl_part_find(mlxsw_sp, MLXSW_SP_KVDL_PART_SINGLE);
+ if (!part)
+ return -EINVAL;
+
+ return mlxsw_sp_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp_kvdl_chunks_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp_kvdl_part_find(mlxsw_sp, MLXSW_SP_KVDL_PART_CHUNKS);
+ if (!part)
+ return -EINVAL;
+
+ return mlxsw_sp_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp_kvdl_large_chunks_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp_kvdl_part_find(mlxsw_sp,
+ MLXSW_SP_KVDL_PART_LARGE_CHUNKS);
+ if (!part)
+ return -EINVAL;
+
+ return mlxsw_sp_kvdl_part_occ(part);
+}
+
+static struct devlink_resource_ops mlxsw_sp_kvdl_single_ops = {
+ .occ_get = mlxsw_sp_kvdl_single_occ_get,
+};
+
+static struct devlink_resource_ops mlxsw_sp_kvdl_chunks_ops = {
+ .occ_get = mlxsw_sp_kvdl_chunks_occ_get,
+};
+
+static struct devlink_resource_ops mlxsw_sp_kvdl_chunks_large_ops = {
+ .occ_get = mlxsw_sp_kvdl_large_chunks_occ_get,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvdl_single_size_params = {
+ .size_min = 0,
+ .size_granularity = 1,
+ .unit = DEVLINK_RESOURCE_UNIT_ENTRY,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvdl_chunks_size_params = {
+ .size_min = 0,
+ .size_granularity = MLXSW_SP_CHUNK_MAX,
+ .unit = DEVLINK_RESOURCE_UNIT_ENTRY,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvdl_large_chunks_size_params = {
+ .size_min = 0,
+ .size_granularity = MLXSW_SP_LARGE_CHUNK_MAX,
+ .unit = DEVLINK_RESOURCE_UNIT_ENTRY,
+};
+
+static void
+mlxsw_sp_kvdl_resource_size_params_prepare(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ u32 kvdl_max_size;
+
+ kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
+
+ mlxsw_sp_kvdl_single_size_params.size_max = kvdl_max_size;
+ mlxsw_sp_kvdl_chunks_size_params.size_max = kvdl_max_size;
+ mlxsw_sp_kvdl_large_chunks_size_params.size_max = kvdl_max_size;
+}
+
+int mlxsw_sp_kvdl_resources_register(struct devlink *devlink)
+{
+ int err;
+
+ mlxsw_sp_kvdl_resource_size_params_prepare(devlink);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+ MLXSW_SP_KVDL_SINGLE_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &mlxsw_sp_kvdl_single_size_params,
+ &mlxsw_sp_kvdl_single_ops);
+ if (err)
+ return err;
+
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_KVDL_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &mlxsw_sp_kvdl_chunks_size_params,
+ &mlxsw_sp_kvdl_chunks_ops);
+ if (err)
+ return err;
+
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &mlxsw_sp_kvdl_large_chunks_size_params,
+ &mlxsw_sp_kvdl_chunks_large_ops);
+ return err;
+}
+
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index d20b143de3b4..978a3c70653a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -126,8 +126,8 @@ mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- ivif = mr_route->mfc4->mfc_parent;
- return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255;
+ ivif = mr_route->mfc4->_c.mfc_parent;
+ return mr_route->mfc4->_c.mfc_un.res.ttls[ivif] != 255;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
default:
@@ -364,7 +364,7 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
mr_route->mfc4 = mfc;
mr_route->mr_table = mr_table;
for (i = 0; i < MAXVIFS; i++) {
- if (mfc->mfc_un.res.ttls[i] != 255) {
+ if (mfc->_c.mfc_un.res.ttls[i] != 255) {
err = mlxsw_sp_mr_route_evif_link(mr_route,
&mr_table->vifs[i]);
if (err)
@@ -374,7 +374,8 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
}
}
- mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]);
+ mlxsw_sp_mr_route_ivif_link(mr_route,
+ &mr_table->vifs[mfc->_c.mfc_parent]);
mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
return mr_route;
@@ -418,9 +419,9 @@ static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
if (offload)
- mr_route->mfc4->mfc_flags |= MFC_OFFLOAD;
+ mr_route->mfc4->_c.mfc_flags |= MFC_OFFLOAD;
else
- mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD;
+ mr_route->mfc4->_c.mfc_flags &= ~MFC_OFFLOAD;
break;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
@@ -943,10 +944,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- if (mr_route->mfc4->mfc_un.res.pkt != packets)
- mr_route->mfc4->mfc_un.res.lastuse = jiffies;
- mr_route->mfc4->mfc_un.res.pkt = packets;
- mr_route->mfc4->mfc_un.res.bytes = bytes;
+ if (mr_route->mfc4->_c.mfc_un.res.pkt != packets)
+ mr_route->mfc4->_c.mfc_un.res.lastuse = jiffies;
+ mr_route->mfc4->_c.mfc_un.res.pkt = packets;
+ mr_route->mfc4->_c.mfc_un.res.bytes = bytes;
break;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 0b7670459051..91262b0573e3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -42,6 +42,8 @@
#include "reg.h"
#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
+#define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
+ MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
enum mlxsw_sp_qdisc_type {
MLXSW_SP_QDISC_NO_QDISC,
@@ -76,6 +78,7 @@ struct mlxsw_sp_qdisc_ops {
struct mlxsw_sp_qdisc {
u32 handle;
u8 tclass_num;
+ u8 prio_bitmap;
union {
struct red_stats red;
} xstats_base;
@@ -99,6 +102,44 @@ mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
mlxsw_sp_qdisc->handle == handle;
}
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
+ bool root_only)
+{
+ int tclass, child_index;
+
+ if (parent == TC_H_ROOT)
+ return mlxsw_sp_port->root_qdisc;
+
+ if (root_only || !mlxsw_sp_port->root_qdisc ||
+ !mlxsw_sp_port->root_qdisc->ops ||
+ TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
+ TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
+ return NULL;
+
+ child_index = TC_H_MIN(parent);
+ tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
+ return &mlxsw_sp_port->tclass_qdiscs[tclass];
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
+{
+ int i;
+
+ if (mlxsw_sp_port->root_qdisc->handle == handle)
+ return mlxsw_sp_port->root_qdisc;
+
+ if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
+ return NULL;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
+ return &mlxsw_sp_port->tclass_qdiscs[i];
+
+ return NULL;
+}
+
static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
@@ -185,6 +226,23 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
+static void
+mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
+ u8 prio_bitmap, u64 *tx_packets,
+ u64 *tx_bytes)
+{
+ int i;
+
+ *tx_packets = 0;
+ *tx_bytes = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (prio_bitmap & BIT(i)) {
+ *tx_packets += xstats->tx_packets[i];
+ *tx_bytes += xstats->tx_bytes[i];
+ }
+ }
+}
+
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
@@ -230,17 +288,16 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
- struct rtnl_link_stats64 *stats;
struct red_stats *red_base;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- stats = &mlxsw_sp_port->periodic_hw_stats.stats;
stats_base = &mlxsw_sp_qdisc->stats_base;
red_base = &mlxsw_sp_qdisc->xstats_base.red;
- stats_base->tx_packets = stats->tx_packets;
- stats_base->tx_bytes = stats->tx_bytes;
-
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
+ mlxsw_sp_qdisc->prio_bitmap,
+ &stats_base->tx_packets,
+ &stats_base->tx_bytes);
red_base->prob_mark = xstats->ecn;
red_base->prob_drop = xstats->wred_drop[tclass_num];
red_base->pdrop = xstats->tail_drop[tclass_num];
@@ -255,6 +312,12 @@ static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
+ struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
+
+ if (root_qdisc != mlxsw_sp_qdisc)
+ root_qdisc->stats_base.backlog -=
+ mlxsw_sp_qdisc->stats_base.backlog;
+
return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
mlxsw_sp_qdisc->tclass_num);
}
@@ -319,6 +382,7 @@ mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_qdisc->stats_base.backlog);
p->qstats->backlog -= backlog;
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
}
static int
@@ -357,14 +421,16 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
- struct rtnl_link_stats64 *stats;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- stats = &mlxsw_sp_port->periodic_hw_stats.stats;
stats_base = &mlxsw_sp_qdisc->stats_base;
- tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
- tx_packets = stats->tx_packets - stats_base->tx_packets;
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
+ mlxsw_sp_qdisc->prio_bitmap,
+ &tx_packets, &tx_bytes);
+ tx_bytes = tx_bytes - stats_base->tx_bytes;
+ tx_packets = tx_packets - stats_base->tx_packets;
+
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
stats_base->overlimits;
drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
@@ -406,11 +472,10 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- if (p->parent != TC_H_ROOT)
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
- mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
-
if (p->command == TC_RED_REPLACE)
return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
mlxsw_sp_qdisc,
@@ -441,9 +506,13 @@ mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
{
int i;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
MLXSW_SP_PORT_DEFAULT_TCLASS);
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ &mlxsw_sp_port->tclass_qdiscs[i]);
+ mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
+ }
return 0;
}
@@ -467,16 +536,41 @@ mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
void *params)
{
struct tc_prio_qopt_offload_params *p = params;
- int tclass, i;
+ struct mlxsw_sp_qdisc *child_qdisc;
+ int tclass, i, band, backlog;
+ u8 old_priomap;
int err;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]);
- err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass);
- if (err)
- return err;
+ for (band = 0; band < p->bands; band++) {
+ tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
+ old_priomap = child_qdisc->prio_bitmap;
+ child_qdisc->prio_bitmap = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (p->priomap[i] == band) {
+ child_qdisc->prio_bitmap |= BIT(i);
+ if (BIT(i) & old_priomap)
+ continue;
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
+ i, tclass);
+ if (err)
+ return err;
+ }
+ }
+ if (old_priomap != child_qdisc->prio_bitmap &&
+ child_qdisc->ops && child_qdisc->ops->clean_stats) {
+ backlog = child_qdisc->stats_base.backlog;
+ child_qdisc->ops->clean_stats(mlxsw_sp_port,
+ child_qdisc);
+ child_qdisc->stats_base.backlog = backlog;
+ }
+ }
+ for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
+ tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
+ child_qdisc->prio_bitmap = 0;
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
}
-
return 0;
}
@@ -513,6 +607,7 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
drops += xstats->tail_drop[i];
+ drops += xstats->wred_drop[i];
backlog += xstats->backlog[i];
}
drops = drops - stats_base->drops;
@@ -548,8 +643,10 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
stats_base->tx_bytes = stats->tx_bytes;
stats_base->drops = 0;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
stats_base->drops += xstats->tail_drop[i];
+ stats_base->drops += xstats->wred_drop[i];
+ }
mlxsw_sp_qdisc->stats_base.backlog = 0;
}
@@ -564,15 +661,48 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
};
+/* Grafting is not supported in mlxsw. It will result in un-offloading of the
+ * grafted qdisc as well as the qdisc in the qdisc new location.
+ * (However, if the graft is to the location where the qdisc is already at, it
+ * will be ignored completely and won't cause un-offloading).
+ */
+static int
+mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_prio_qopt_offload_graft_params *p)
+{
+ int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
+ struct mlxsw_sp_qdisc *old_qdisc;
+
+ /* Check if the grafted qdisc is already in its "new" location. If so -
+ * nothing needs to be done.
+ */
+ if (p->band < IEEE_8021QAZ_MAX_TCS &&
+ mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
+ return 0;
+
+ /* See if the grafted qdisc is already offloaded on any tclass. If so,
+ * unoffload it.
+ */
+ old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
+ p->child_handle);
+ if (old_qdisc)
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
+
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
+ return -EOPNOTSUPP;
+}
+
int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- if (p->parent != TC_H_ROOT)
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
- mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
if (p->command == TC_PRIO_REPLACE)
return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
mlxsw_sp_qdisc,
@@ -589,6 +719,9 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
case TC_PRIO_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
+ case TC_PRIO_GRAFT:
+ return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->graft_params);
default:
return -EOPNOTSUPP;
}
@@ -596,17 +729,36 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
- mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc),
- GFP_KERNEL);
- if (!mlxsw_sp_port->root_qdisc)
- return -ENOMEM;
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+ int i;
+ mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
+ if (!mlxsw_sp_qdisc)
+ goto err_root_qdisc_init;
+
+ mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
+ mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+ mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc) * IEEE_8021QAZ_MAX_TCS,
+ GFP_KERNEL);
+ if (!mlxsw_sp_qdisc)
+ goto err_tclass_qdiscs_init;
+
+ mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
+
return 0;
+
+err_tclass_qdiscs_init:
+ kfree(mlxsw_sp_port->root_qdisc);
+err_root_qdisc_init:
+ return -ENOMEM;
}
void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ kfree(mlxsw_sp_port->tclass_qdiscs);
kfree(mlxsw_sp_port->root_qdisc);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index f7948e983637..921bd1075edf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1,10 +1,10 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
- * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
- * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
+ * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -70,6 +70,7 @@
#include "spectrum_mr.h"
#include "spectrum_mr_tcam.h"
#include "spectrum_router.h"
+#include "spectrum_span.h"
struct mlxsw_sp_fib;
struct mlxsw_sp_vr;
@@ -796,7 +797,7 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
if (!vr) {
- NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
return ERR_PTR(-EBUSY);
}
fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
@@ -1024,9 +1025,11 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_ipip_type ipipt,
struct net_device *ol_dev)
{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_ipip_entry *ret = NULL;
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
if (!ipip_entry)
return ERR_PTR(-ENOMEM);
@@ -1040,7 +1043,15 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
ipip_entry->ipipt = ipipt;
ipip_entry->ol_dev = ol_dev;
- ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev);
+
+ switch (ipip_ops->ul_proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ WARN_ON(1);
+ break;
+ }
return ipip_entry;
@@ -2320,6 +2331,8 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
read_unlock_bh(&n->lock);
rtnl_lock();
+ mlxsw_sp_span_respin(mlxsw_sp);
+
entry_connected = nud_state & NUD_VALID && !dead;
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
if (!entry_connected && !neigh_entry)
@@ -2417,7 +2430,8 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
mlxsw_core_schedule_work(&net_work->work);
mlxsw_sp_port_dev_put(mlxsw_sp_port);
break;
- case NETEVENT_MULTIPATH_HASH_UPDATE:
+ case NETEVENT_IPV4_MPATH_HASH_UPDATE:
+ case NETEVENT_IPV6_MPATH_HASH_UPDATE:
net = ptr;
if (!net_eq(net, &init_net))
@@ -5579,6 +5593,8 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
/* Protect internal structures from changes */
rtnl_lock();
+ mlxsw_sp_span_respin(mlxsw_sp);
+
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_APPEND: /* fall through */
@@ -5621,6 +5637,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
int err;
rtnl_lock();
+ mlxsw_sp_span_respin(mlxsw_sp);
+
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_ADD:
@@ -5793,7 +5811,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
}
if (err < 0)
- NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
+ NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload");
return err;
}
@@ -6032,7 +6050,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
if (err) {
- NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
goto err_rif_index_alloc;
}
@@ -7013,13 +7031,25 @@ static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
{
+ bool only_l3 = !ip6_multipath_hash_policy(&init_net);
+
mlxsw_sp_mp_hash_header_set(recr2_pl,
MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
- mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
+ if (only_l3) {
+ mlxsw_sp_mp_hash_field_set(recr2_pl,
+ MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
+ } else {
+ mlxsw_sp_mp_hash_header_set(recr2_pl,
+ MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
+ mlxsw_sp_mp_hash_field_set(recr2_pl,
+ MLXSW_REG_RECR2_TCP_UDP_SPORT);
+ mlxsw_sp_mp_hash_field_set(recr2_pl,
+ MLXSW_REG_RECR2_TCP_UDP_DPORT);
+ }
}
static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
new file mode 100644
index 000000000000..ae22a3daffbf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -0,0 +1,804 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2018 Petr Machata <petrm@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/list.h>
+#include <net/arp.h>
+#include <net/gre.h>
+#include <net/ndisc.h>
+#include <net/ip6_tunnel.h>
+
+#include "spectrum.h"
+#include "spectrum_span.h"
+#include "spectrum_ipip.h"
+
+int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
+ return -EIO;
+
+ mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_SPAN);
+ mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
+ sizeof(struct mlxsw_sp_span_entry),
+ GFP_KERNEL);
+ if (!mlxsw_sp->span.entries)
+ return -ENOMEM;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ INIT_LIST_HEAD(&curr->bound_ports_list);
+ curr->id = i;
+ }
+
+ return 0;
+}
+
+void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
+ }
+ kfree(mlxsw_sp->span.entries);
+}
+
+static int
+mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ sparmsp->dest_port = netdev_priv(to_dev);
+ return 0;
+}
+
+static int
+mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ /* Create a new port analayzer entry for local_port. */
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_reg_mpat_span_type span_type)
+{
+ struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
+ .can_handle = mlxsw_sp_port_dev_check,
+ .parms = mlxsw_sp_span_entry_phys_parms,
+ .configure = mlxsw_sp_span_entry_phys_configure,
+ .deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
+};
+
+static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *l3edev,
+ unsigned char dmac[ETH_ALEN])
+{
+ struct neighbour *neigh = neigh_lookup(tbl, pkey, l3edev);
+ int err = 0;
+
+ if (!neigh) {
+ neigh = neigh_create(tbl, pkey, l3edev);
+ if (IS_ERR(neigh))
+ return PTR_ERR(neigh);
+ }
+
+ neigh_event_send(neigh, NULL);
+
+ read_lock_bh(&neigh->lock);
+ if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
+ memcpy(dmac, neigh->ha, ETH_ALEN);
+ else
+ err = -ENOENT;
+ read_unlock_bh(&neigh->lock);
+
+ neigh_release(neigh);
+ return err;
+}
+
+static int
+mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
+{
+ sparmsp->dest_port = NULL;
+ return 0;
+}
+
+static __maybe_unused int
+mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev,
+ union mlxsw_sp_l3addr saddr,
+ union mlxsw_sp_l3addr daddr,
+ union mlxsw_sp_l3addr gw,
+ __u8 ttl,
+ struct neigh_table *tbl,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ unsigned char dmac[ETH_ALEN];
+
+ if (mlxsw_sp_l3addr_is_zero(gw))
+ gw = daddr;
+
+ if (!l3edev || !mlxsw_sp_port_dev_check(l3edev) ||
+ mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ sparmsp->dest_port = netdev_priv(l3edev);
+ sparmsp->ttl = ttl;
+ memcpy(sparmsp->dmac, dmac, ETH_ALEN);
+ memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN);
+ sparmsp->saddr = saddr;
+ sparmsp->daddr = daddr;
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_NET_IPGRE)
+static struct net_device *
+mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
+ __be32 *saddrp, __be32 *daddrp)
+{
+ struct ip_tunnel *tun = netdev_priv(to_dev);
+ struct net_device *dev = NULL;
+ struct ip_tunnel_parm parms;
+ struct rtable *rt = NULL;
+ struct flowi4 fl4;
+
+ /* We assume "dev" stays valid after rt is put. */
+ ASSERT_RTNL();
+
+ parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
+ 0, 0, parms.link, tun->fwmark);
+
+ rt = ip_route_output_key(tun->net, &fl4);
+ if (IS_ERR(rt))
+ return NULL;
+
+ if (rt->rt_type != RTN_UNICAST)
+ goto out;
+
+ dev = rt->dst.dev;
+ *saddrp = fl4.saddr;
+ *daddrp = rt->rt_gateway;
+
+out:
+ ip_rt_put(rt);
+ return dev;
+}
+
+static int
+mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
+ union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
+ bool inherit_tos = tparm.iph.tos & 0x1;
+ bool inherit_ttl = !tparm.iph.ttl;
+ union mlxsw_sp_l3addr gw = daddr;
+ struct net_device *l3edev;
+
+ if (!(to_dev->flags & IFF_UP) ||
+ /* Reject tunnels with GRE keys, checksums, etc. */
+ tparm.i_flags || tparm.o_flags ||
+ /* Require a fixed TTL and a TOS copied from the mirrored packet. */
+ inherit_ttl || !inherit_tos ||
+ /* A destination address may not be "any". */
+ mlxsw_sp_l3addr_is_zero(daddr))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
+ return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
+ tparm.iph.ttl,
+ &arp_tbl, sparmsp);
+}
+
+static int
+mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ /* Create a new port analayzer entry for local_port. */
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+ mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
+ MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
+ sparms.dmac, false);
+ mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
+ sparms.ttl, sparms.smac,
+ be32_to_cpu(sparms.saddr.addr4),
+ be32_to_cpu(sparms.daddr.addr4));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+}
+
+static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
+ .can_handle = is_gretap_dev,
+ .parms = mlxsw_sp_span_entry_gretap4_parms,
+ .configure = mlxsw_sp_span_entry_gretap4_configure,
+ .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6_GRE)
+static struct net_device *
+mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
+ struct in6_addr *saddrp,
+ struct in6_addr *daddrp)
+{
+ struct ip6_tnl *t = netdev_priv(to_dev);
+ struct flowi6 fl6 = t->fl.u.ip6;
+ struct net_device *dev = NULL;
+ struct dst_entry *dst;
+ struct rt6_info *rt6;
+
+ /* We assume "dev" stays valid after dst is released. */
+ ASSERT_RTNL();
+
+ fl6.flowi6_mark = t->parms.fwmark;
+ if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
+ return NULL;
+
+ dst = ip6_route_output(t->net, NULL, &fl6);
+ if (!dst || dst->error)
+ goto out;
+
+ rt6 = container_of(dst, struct rt6_info, dst);
+
+ dev = dst->dev;
+ *saddrp = fl6.saddr;
+ *daddrp = rt6->rt6i_gateway;
+
+out:
+ dst_release(dst);
+ return dev;
+}
+
+static int
+mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
+ bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
+ union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
+ union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
+ bool inherit_ttl = !tparm.hop_limit;
+ union mlxsw_sp_l3addr gw = daddr;
+ struct net_device *l3edev;
+
+ if (!(to_dev->flags & IFF_UP) ||
+ /* Reject tunnels with GRE keys, checksums, etc. */
+ tparm.i_flags || tparm.o_flags ||
+ /* Require a fixed TTL and a TOS copied from the mirrored packet. */
+ inherit_ttl || !inherit_tos ||
+ /* A destination address may not be "any". */
+ mlxsw_sp_l3addr_is_zero(daddr))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
+ return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
+ tparm.hop_limit,
+ &nd_tbl, sparmsp);
+}
+
+static int
+mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ /* Create a new port analayzer entry for local_port. */
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+ mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
+ MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
+ sparms.dmac, false);
+ mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
+ sparms.saddr.addr6,
+ sparms.daddr.addr6);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
+ .can_handle = is_ip6gretap_dev,
+ .parms = mlxsw_sp_span_entry_gretap6_parms,
+ .configure = mlxsw_sp_span_entry_gretap6_configure,
+ .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
+};
+#endif
+
+static const
+struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
+ &mlxsw_sp_span_entry_ops_phys,
+#if IS_ENABLED(CONFIG_NET_IPGRE)
+ &mlxsw_sp_span_entry_ops_gretap4,
+#endif
+#if IS_ENABLED(CONFIG_IPV6_GRE)
+ &mlxsw_sp_span_entry_ops_gretap6,
+#endif
+};
+
+static int
+mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+}
+
+static int
+mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+}
+
+static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
+ .parms = mlxsw_sp_span_entry_nop_parms,
+ .configure = mlxsw_sp_span_entry_nop_configure,
+ .deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
+};
+
+static void
+mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ if (sparms.dest_port) {
+ if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
+ netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
+ sparms.dest_port->dev->name);
+ sparms.dest_port = NULL;
+ } else if (span_entry->ops->configure(span_entry, sparms)) {
+ netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
+ sparms.dest_port->dev->name);
+ sparms.dest_port = NULL;
+ }
+ }
+
+ span_entry->parms = sparms;
+}
+
+static void
+mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ if (span_entry->parms.dest_port)
+ span_entry->ops->deconfigure(span_entry);
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ const struct mlxsw_sp_span_entry_ops *ops,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_span_entry *span_entry = NULL;
+ int i;
+
+ /* find a free entry to use */
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ if (!mlxsw_sp->span.entries[i].ref_count) {
+ span_entry = &mlxsw_sp->span.entries[i];
+ break;
+ }
+ }
+ if (!span_entry)
+ return NULL;
+
+ span_entry->ops = ops;
+ span_entry->ref_count = 1;
+ span_entry->to_dev = to_dev;
+ mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
+
+ return span_entry;
+}
+
+static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure(span_entry);
+}
+
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ if (curr->ref_count && curr->to_dev == to_dev)
+ return curr;
+ }
+ return NULL;
+}
+
+void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure(span_entry);
+ span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ if (curr->ref_count && curr->id == span_id)
+ return curr;
+ }
+ return NULL;
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ const struct mlxsw_sp_span_entry_ops *ops,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
+ if (span_entry) {
+ /* Already exists, just take a reference */
+ span_entry->ref_count++;
+ return span_entry;
+ }
+
+ return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
+}
+
+static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ WARN_ON(!span_entry->ref_count);
+ if (--span_entry->ref_count == 0)
+ mlxsw_sp_span_entry_destroy(span_entry);
+ return 0;
+}
+
+static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ struct mlxsw_sp_span_inspected_port *p;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ list_for_each_entry(p, &curr->bound_ports_list, list)
+ if (p->local_port == port->local_port &&
+ p->type == MLXSW_SP_SPAN_EGRESS)
+ return true;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
+ int mtu)
+{
+ return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
+}
+
+int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int err;
+
+ /* If port is egress mirrored, the shared buffer size should be
+ * updated according to the mtu value
+ */
+ if (mlxsw_sp_span_is_egress_mirror(port)) {
+ u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
+
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err) {
+ netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static struct mlxsw_sp_span_inspected_port *
+mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ struct mlxsw_sp_span_inspected_port *p;
+
+ list_for_each_entry(p, &span_entry->bound_ports_list, list)
+ if (port->local_port == p->local_port)
+ return p;
+ return NULL;
+}
+
+static int
+mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char mpar_pl[MLXSW_REG_MPAR_LEN];
+ int pa_id = span_entry->id;
+
+ /* bind the port to the SPAN entry */
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+}
+
+static int
+mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int err;
+
+ /* if it is an egress SPAN, bind a shared buffer to it */
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
+ port->dev->mtu);
+
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err) {
+ netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
+ return err;
+ }
+ }
+
+ if (bind) {
+ err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ true);
+ if (err)
+ goto err_port_bind;
+ }
+
+ inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
+ if (!inspected_port) {
+ err = -ENOMEM;
+ goto err_inspected_port_alloc;
+ }
+ inspected_port->local_port = port->local_port;
+ inspected_port->type = type;
+ list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
+
+ return 0;
+
+err_inspected_port_alloc:
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
+err_port_bind:
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ }
+ return err;
+}
+
+static void
+mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+
+ inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
+ if (!inspected_port)
+ return;
+
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
+ /* remove the SBIB buffer if it was egress SPAN */
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ }
+
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+
+ list_del(&inspected_port->list);
+ kfree(inspected_port);
+}
+
+static const struct mlxsw_sp_span_entry_ops *
+mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
+ if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
+ return mlxsw_sp_span_entry_types[i];
+
+ return NULL;
+}
+
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ const struct net_device *to_dev,
+ enum mlxsw_sp_span_type type, bool bind,
+ int *p_span_id)
+{
+ struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
+ const struct mlxsw_sp_span_entry_ops *ops;
+ struct mlxsw_sp_span_parms sparms = {0};
+ struct mlxsw_sp_span_entry *span_entry;
+ int err;
+
+ ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
+ if (!ops) {
+ netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
+ return -EOPNOTSUPP;
+ }
+
+ err = ops->parms(to_dev, &sparms);
+ if (err)
+ return err;
+
+ span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
+ if (!span_entry)
+ return -ENOENT;
+
+ netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
+ span_entry->id);
+
+ err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
+ if (err)
+ goto err_port_bind;
+
+ *p_span_id = span_entry->id;
+ return 0;
+
+err_port_bind:
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+ return err;
+}
+
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
+ enum mlxsw_sp_span_type type, bool bind)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
+ if (!span_entry) {
+ netdev_err(from->dev, "no span entry found\n");
+ return;
+ }
+
+ netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
+ span_entry->id);
+ mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
+}
+
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+ int err;
+
+ ASSERT_RTNL();
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ struct mlxsw_sp_span_parms sparms = {0};
+
+ if (!curr->ref_count)
+ continue;
+
+ err = curr->ops->parms(curr->to_dev, &sparms);
+ if (err)
+ continue;
+
+ if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
+ mlxsw_sp_span_entry_deconfigure(curr);
+ mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
new file mode 100644
index 000000000000..4b87ec20e658
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -0,0 +1,107 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.h
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_SPAN_H
+#define _MLXSW_SPECTRUM_SPAN_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "spectrum_router.h"
+
+struct mlxsw_sp;
+struct mlxsw_sp_port;
+
+enum mlxsw_sp_span_type {
+ MLXSW_SP_SPAN_EGRESS,
+ MLXSW_SP_SPAN_INGRESS
+};
+
+struct mlxsw_sp_span_inspected_port {
+ struct list_head list;
+ enum mlxsw_sp_span_type type;
+ u8 local_port;
+
+ /* Whether this is a directly bound mirror (port-to-port) or an ACL. */
+ bool bound;
+};
+
+struct mlxsw_sp_span_parms {
+ struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */
+ unsigned int ttl;
+ unsigned char dmac[ETH_ALEN];
+ unsigned char smac[ETH_ALEN];
+ union mlxsw_sp_l3addr daddr;
+ union mlxsw_sp_l3addr saddr;
+};
+
+struct mlxsw_sp_span_entry_ops;
+
+struct mlxsw_sp_span_entry {
+ const struct net_device *to_dev;
+ const struct mlxsw_sp_span_entry_ops *ops;
+ struct mlxsw_sp_span_parms parms;
+ struct list_head bound_ports_list;
+ int ref_count;
+ int id;
+};
+
+struct mlxsw_sp_span_entry_ops {
+ bool (*can_handle)(const struct net_device *to_dev);
+ int (*parms)(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp);
+ int (*configure)(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms);
+ void (*deconfigure)(struct mlxsw_sp_span_entry *span_entry);
+};
+
+int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp);
+
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ const struct net_device *to_dev,
+ enum mlxsw_sp_span_type type,
+ bool bind, int *p_span_id);
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
+ enum mlxsw_sp_span_type type, bool bind);
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev);
+
+void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry);
+
+int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 161bcdc012f0..c11c9a635866 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1844,7 +1844,7 @@ mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
if (is_vlan_dev(bridge_port->dev)) {
- NL_SET_ERR_MSG(extack, "spectrum: Can not enslave a VLAN device to a VLAN-aware bridge");
+ NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
return -EINVAL;
}
@@ -1907,20 +1907,16 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct net_device *dev = bridge_port->dev;
u16 vid;
- if (!is_vlan_dev(bridge_port->dev)) {
- NL_SET_ERR_MSG(extack, "spectrum: Only VLAN devices can be enslaved to a VLAN-unaware bridge");
- return -EINVAL;
- }
- vid = vlan_dev_vlan_id(bridge_port->dev);
-
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return -EINVAL;
if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
- NL_SET_ERR_MSG(extack, "spectrum: Can not bridge VLAN uppers of the same port");
+ NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
return -EINVAL;
}
@@ -1937,8 +1933,10 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- u16 vid = vlan_dev_vlan_id(bridge_port->dev);
+ struct net_device *dev = bridge_port->dev;
+ u16 vid;
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index f3c29bbf07e2..c87b0934a405 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -789,7 +789,7 @@ mlxsw_sx_port_get_link_ksettings(struct net_device *dev,
u32 supported, advertising, lp_advertising;
int err;
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
if (err) {
netdev_err(dev, "Failed to get proto");
@@ -879,7 +879,7 @@ mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
mlxsw_sx_to_ptys_advert_link(advertising) :
mlxsw_sx_to_ptys_speed(speed);
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
if (err) {
netdev_err(dev, "Failed to get proto");
@@ -897,7 +897,7 @@ mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
return 0;
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
- eth_proto_new);
+ eth_proto_new, true);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
if (err) {
netdev_err(dev, "Failed to set proto admin");
@@ -1029,7 +1029,7 @@ mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width)
eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed);
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
- eth_proto_admin);
+ eth_proto_admin, true);
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
}