From 9fba2b9b4f1566213a4f0cec658479d8915086fa Mon Sep 17 00:00:00 2001 From: Ariel Levkovich Date: Sun, 31 Mar 2019 19:44:43 +0300 Subject: net/mlx5: Expose SW ICM related device memory capabilities Add SW ICM related fields to the device memory capabilities structure and sw ownership capability in flow table properties. The currently supported SW ICM types are steering and header modify and the changes exposes the device memory capabilities for each of these two types. SW ICM memory can be allocated by SW and then be accessed by RDMA operations for direct management of the HW packet handling tables. Signed-off-by: Ariel Levkovich Reviewed-by: Eli Cohen Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 45 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 11e498442134..d96eb0916a44 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -80,6 +80,14 @@ enum { MLX5_SHARED_RESOURCE_UID = 0xffff, }; +enum { + MLX5_OBJ_TYPE_SW_ICM = 0x0008, +}; + +enum { + MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM), +}; + enum { MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, MLX5_CMD_OP_QUERY_ADAPTER = 0x101, @@ -357,7 +365,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 pop_vlan_2[0x1]; u8 push_vlan_2[0x1]; u8 reformat_and_vlan_action[0x1]; - u8 reserved_at_10[0x2]; + u8 reserved_at_10[0x1]; + u8 sw_owner[0x1]; u8 reformat_l3_tunnel_to_l2[0x1]; u8 reformat_l2_to_l3_tunnel[0x1]; u8 reformat_and_modify_action[0x1]; @@ -770,7 +779,19 @@ struct mlx5_ifc_device_mem_cap_bits { u8 max_memic_size[0x20]; - u8 reserved_at_c0[0x740]; + u8 steering_sw_icm_start_address[0x40]; + + u8 reserved_at_100[0x8]; + u8 log_header_modify_sw_icm_size[0x8]; + u8 reserved_at_110[0x2]; + u8 log_sw_icm_alloc_granularity[0x6]; + u8 log_steering_sw_icm_size[0x8]; + + u8 reserved_at_120[0x20]; + + u8 header_modify_sw_icm_start_address[0x40]; + + u8 reserved_at_180[0x680]; }; enum { @@ -919,6 +940,7 @@ enum { enum { MLX5_UCTX_CAP_RAW_TX = 1UL << 0, + MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1, }; struct mlx5_ifc_cmd_hca_cap_bits { @@ -2920,6 +2942,7 @@ enum { MLX5_MKC_ACCESS_MODE_MTT = 0x1, MLX5_MKC_ACCESS_MODE_KLMS = 0x2, MLX5_MKC_ACCESS_MODE_KSM = 0x3, + MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4, MLX5_MKC_ACCESS_MODE_MEMIC = 0x5, }; @@ -9491,6 +9514,19 @@ struct mlx5_ifc_uctx_bits { u8 reserved_at_20[0x160]; }; +struct mlx5_ifc_sw_icm_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x18]; + u8 log_sw_icm_size[0x8]; + + u8 reserved_at_60[0x20]; + + u8 sw_icm_start_addr[0x40]; + + u8 reserved_at_c0[0x140]; +}; + struct mlx5_ifc_create_umem_in_bits { u8 opcode[0x10]; u8 uid[0x10]; @@ -9528,6 +9564,11 @@ struct mlx5_ifc_destroy_uctx_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_create_sw_icm_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_sw_icm_bits sw_icm; +}; + struct mlx5_ifc_mtrc_string_db_param_bits { u8 string_db_base_address[0x20]; -- cgit v1.2.3-55-g7522 From 3e07047021d36674d9051e76454e8b6a3b599036 Mon Sep 17 00:00:00 2001 From: Ariel Levkovich Date: Sun, 31 Mar 2019 19:44:48 +0300 Subject: net/mlx5: Expose TIR ICM address in command outbox Adding the TIR ICM address to the create_tir command outbox through which the device reports the ICM address of the newly created TIR. The TIR address can be used for direct attachment to a steering rule in SW managed steering mode. Signed-off-by: Ariel Levkovich Reviewed-by: Eli Cohen Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index d96eb0916a44..4b37519bd6a5 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -6897,14 +6897,14 @@ struct mlx5_ifc_create_tis_in_bits { struct mlx5_ifc_create_tir_out_bits { u8 status[0x8]; - u8 reserved_at_8[0x18]; + u8 icm_address_63_40[0x18]; u8 syndrome[0x20]; - u8 reserved_at_40[0x8]; + u8 icm_address_39_32[0x8]; u8 tirn[0x18]; - u8 reserved_at_60[0x20]; + u8 icm_address_31_0[0x20]; }; struct mlx5_ifc_create_tir_in_bits { -- cgit v1.2.3-55-g7522 From 96780e4f46b2fc0fc5ae2b95957002e2c42b11d3 Mon Sep 17 00:00:00 2001 From: Ariel Levkovich Date: Sun, 31 Mar 2019 19:44:49 +0300 Subject: net/mlx5: Introduce new TIR creation core API Introducing new TIR creation core API which allows caller to receive back from the call the full command outbox. This comes as a preparation for the next patch that will retrieve the TIR ICM address from the command outbox. Signed-off-by: Ariel Levkovich Reviewed-by: Eli Cohen Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/transobj.c | 18 +++++++++++++----- include/linux/mlx5/transobj.h | 3 +++ 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index c4d4b76096dc..b1068500f1df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -182,16 +182,24 @@ out: } EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state); +int mlx5_core_create_tir_out(struct mlx5_core_dev *dev, + u32 *in, int inlen, + u32 *out, int outlen) +{ + MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); + + return mlx5_cmd_exec(dev, in, inlen, out, outlen); +} +EXPORT_SYMBOL(mlx5_core_create_tir_out); + int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn) { - u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {}; int err; - MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + err = mlx5_core_create_tir_out(dev, in, inlen, + out, sizeof(out)); if (!err) *tirn = MLX5_GET(create_tir_out, out, tirn); diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index a261d5528ff7..dc6b1e7cb8c4 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -50,6 +50,9 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out); int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state); int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn); +int mlx5_core_create_tir_out(struct mlx5_core_dev *dev, + u32 *in, int inlen, + u32 *out, int outlen); int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, int inlen); void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); -- cgit v1.2.3-55-g7522 From 316793fb2d907d4726b4977b6be26ec653827774 Mon Sep 17 00:00:00 2001 From: Eli Britstein Date: Mon, 29 Apr 2019 18:14:01 +0000 Subject: net/mlx5: E-Switch: Introduce prio tag mode Current ConnectX HW is unable to perform VLAN pop in TX path and VLAN push on RX path. To workaround that limitation untagged packets will be tagged with VLAN ID 0x000 (priority tag) and pop/push actions will be replaced by VLAN re-write actions (which are supported by the HW). Introduce prio tag mode as a pre-step to controlling the workaround behavior. Signed-off-by: Eli Britstein Reviewed-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 4b37519bd6a5..eeedf3f53ed3 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -951,7 +951,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_srq_sz[0x8]; u8 log_max_qp_sz[0x8]; - u8 reserved_at_90[0xb]; + u8 reserved_at_90[0x8]; + u8 prio_tag_required[0x1]; + u8 reserved_at_99[0x2]; u8 log_max_qp[0x5]; u8 reserved_at_a0[0xb]; -- cgit v1.2.3-55-g7522 From 27b942fbbd3107d4e969ece133925cd646239ef4 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 29 Apr 2019 18:14:02 +0000 Subject: net/mlx5: Get rid of storing copy of device name Currently mlx5 core stores copy of the PCI device name in a mlx5_priv structure and uses pr_warn, pr_err helpers. Get rid of the copy of this name; instead store the parent device pointer that contains name as well as dma specific parameters. This also allows to use kernel's well defined dev_warn, dev_err, dev_dbg device specific print routines. This is also a preparation patch to access non PCI parent device in future. Signed-off-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 2 +- .../mellanox/mlx5/core/diag/fw_tracer_tracepoint.h | 5 +- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 5 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 8 +-- drivers/net/ethernet/mellanox/mlx5/core/health.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 15 +++-- .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 65 ++++++++++++---------- .../net/ethernet/mellanox/mlx5/core/pagealloc.c | 5 +- include/linux/mlx5/driver.h | 3 +- 9 files changed, 57 insertions(+), 53 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 0a2ffe794a54..779fa1e9ee4e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1347,7 +1347,7 @@ static void set_wqname(struct mlx5_core_dev *dev) struct mlx5_cmd *cmd = &dev->cmd; snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", - dev->priv.name); + dev_name(dev->device)); } static void clean_debug_files(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h index 7b5901d42994..3038be575923 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h @@ -47,7 +47,7 @@ TRACE_EVENT(mlx5_fw, TP_ARGS(tracer, trace_timestamp, lost, event_id, msg), TP_STRUCT__entry( - __string(dev_name, tracer->dev->priv.name) + __string(dev_name, dev_name(tracer->dev->device)) __field(u64, trace_timestamp) __field(bool, lost) __field(u8, event_id) @@ -55,7 +55,8 @@ TRACE_EVENT(mlx5_fw, ), TP_fast_assign( - __assign_str(dev_name, tracer->dev->priv.name); + __assign_str(dev_name, + dev_name(tracer->dev->device)); __entry->trace_timestamp = trace_timestamp; __entry->lost = lost; __entry->event_id = event_id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index b4967a0ff8c7..a40e801cf713 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -663,7 +663,8 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, } netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n", - hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name, + hp->tirn, hp->pair->rqn[0], + dev_name(hp->pair->peer_mdev->device), hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets); hpe->hp = hp; @@ -700,7 +701,7 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, hpe = list_entry(next, struct mlx5e_hairpin_entry, flows); netdev_dbg(priv->netdev, "del hairpin: peer %s\n", - hpe->hp->pair->peer_mdev->priv.name); + dev_name(hpe->hp->pair->peer_mdev->device)); mlx5e_hairpin_destroy(hpe->hp); hash_del(&hpe->hairpin_hlist); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 3f3cd32ae60a..c1d6bb90cf04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -376,11 +376,11 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, #define MLX5_DEBUG_ESWITCH_MASK BIT(3) -#define esw_info(dev, format, ...) \ - pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) +#define esw_info(__dev, format, ...) \ + dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) -#define esw_warn(dev, format, ...) \ - pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) +#define esw_warn(__dev, format, ...) \ + dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) #define esw_debug(dev, format, ...) \ mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 3b98fcdd7d0e..a2656f4008d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -380,7 +380,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev) return -ENOMEM; strcpy(name, "mlx5_health"); - strcat(name, dev->priv.name); + strcat(name, dev_name(dev->device)); health->wq = create_singlethread_workqueue(name); kfree(name); if (!health->wq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index b200a29d1420..bedf809737b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -741,7 +741,6 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev, struct mlx5_priv *priv = &dev->priv; int err = 0; - dev->pdev = pdev; priv->pci_dev_data = id->driver_data; pci_set_drvdata(dev->pdev, dev); @@ -1242,14 +1241,11 @@ static const struct devlink_ops mlx5_devlink_ops = { #endif }; -static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx, const char *name) +static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) { struct mlx5_priv *priv = &dev->priv; int err; - strncpy(priv->name, name, MLX5_MAX_NAME_LEN); - priv->name[MLX5_MAX_NAME_LEN - 1] = 0; - dev->profile = &profile[profile_idx]; INIT_LIST_HEAD(&priv->ctx_list); @@ -1267,9 +1263,10 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx, const char INIT_LIST_HEAD(&priv->pgdir_list); spin_lock_init(&priv->mkey_lock); - priv->dbg_root = debugfs_create_dir(name, mlx5_debugfs_root); + priv->dbg_root = debugfs_create_dir(dev_name(dev->device), + mlx5_debugfs_root); if (!priv->dbg_root) { - pr_err("mlx5_core: %s error, Cannot create debugfs dir, aborting\n", name); + dev_err(dev->device, "mlx5_core: error, Cannot create debugfs dir, aborting\n"); return -ENOMEM; } @@ -1312,8 +1309,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) } dev = devlink_priv(devlink); + dev->device = &pdev->dev; + dev->pdev = pdev; - err = mlx5_mdev_init(dev, prof_sel, dev_name(&pdev->dev)); + err = mlx5_mdev_init(dev, prof_sel); if (err) goto mdev_init_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index d66f4f082ef7..0e111b70c178 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -41,6 +41,7 @@ #include #include #include +#include #define DRIVER_NAME "mlx5_core" #define DRIVER_VERSION "5.0-0" @@ -48,53 +49,57 @@ extern uint mlx5_core_debug_mask; #define mlx5_core_dbg(__dev, format, ...) \ - pr_debug("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ + dev_dbg((__dev)->device, "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) -#define mlx5_core_dbg_once(__dev, format, ...) \ - pr_debug_once("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ - __func__, __LINE__, current->pid, \ +#define mlx5_core_dbg_once(__dev, format, ...) \ + dev_dbg_once((__dev)->device, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ ##__VA_ARGS__) -#define mlx5_core_dbg_mask(__dev, mask, format, ...) \ -do { \ - if ((mask) & mlx5_core_debug_mask) \ - mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \ +#define mlx5_core_dbg_mask(__dev, mask, format, ...) \ +do { \ + if ((mask) & mlx5_core_debug_mask) \ + mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \ } while (0) -#define mlx5_core_err(__dev, format, ...) \ - pr_err("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ - __func__, __LINE__, current->pid, \ +#define mlx5_core_err(__dev, format, ...) \ + dev_err((__dev)->device, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ ##__VA_ARGS__) -#define mlx5_core_err_rl(__dev, format, ...) \ - pr_err_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ - __func__, __LINE__, current->pid, \ - ##__VA_ARGS__) +#define mlx5_core_err_rl(__dev, format, ...) \ + dev_err_ratelimited((__dev)->device, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) -#define mlx5_core_warn(__dev, format, ...) \ - pr_warn("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ - __func__, __LINE__, current->pid, \ - ##__VA_ARGS__) +#define mlx5_core_warn(__dev, format, ...) \ + dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) #define mlx5_core_warn_once(__dev, format, ...) \ - pr_warn_once("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ + dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) -#define mlx5_core_warn_rl(__dev, format, ...) \ - pr_warn_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ - __func__, __LINE__, current->pid, \ - ##__VA_ARGS__) +#define mlx5_core_warn_rl(__dev, format, ...) \ + dev_warn_ratelimited((__dev)->device, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) -#define mlx5_core_info(__dev, format, ...) \ - pr_info("%s " format, (__dev)->priv.name, ##__VA_ARGS__) +#define mlx5_core_info(__dev, format, ...) \ + dev_info((__dev)->device, format, ##__VA_ARGS__) -#define mlx5_core_info_rl(__dev, format, ...) \ - pr_info_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ - __func__, __LINE__, current->pid, \ - ##__VA_ARGS__) +#define mlx5_core_info_rl(__dev, format, ...) \ + dev_info_ratelimited((__dev)->device, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) enum { MLX5_CMD_DATA, /* print command payload only */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 41025387ff2c..d87d42f4f6dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -600,8 +600,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) return 0; } - mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_pages, - dev->priv.name); + mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages); while (*pages) { if (time_after(jiffies, end)) { mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages); @@ -614,6 +613,6 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) msleep(50); } - mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name); + mlx5_core_dbg(dev, "All pages received\n"); return 0; } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6c43191c0186..582a9680b182 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -56,7 +56,6 @@ enum { MLX5_BOARD_ID_LEN = 64, - MLX5_MAX_NAME_LEN = 16, }; enum { @@ -514,7 +513,6 @@ struct mlx5_rl_table { }; struct mlx5_priv { - char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table *eq_table; /* pages stuff */ @@ -641,6 +639,7 @@ struct mlx5_fw_tracer; struct mlx5_vxlan; struct mlx5_core_dev { + struct device *device; struct pci_dev *pdev; /* sync pci state */ struct mutex pci_status_mutex; -- cgit v1.2.3-55-g7522 From c42260f1954598b92d9e834f3d55160318573a5e Mon Sep 17 00:00:00 2001 From: Vu Pham Date: Mon, 29 Apr 2019 18:14:05 +0000 Subject: net/mlx5: Separate and generalize dma device from pci device The mlx5 Sub-Function (SF) sub device will be introduced in subsequent patches. It will be created as mediated device and belong to mdev bus. It is necessary to treat dma operations on PF, VF and SF in uniform way, hence reduce the dependency on pdev pci dev struct and work directly out of newly introduced 'struct device' from previous patch. This patch does not change any functionality. Signed-off-by: Vu Pham Reviewed-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/main.c | 10 ++++++---- drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 19 ++++++++++--------- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 7 +++---- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 14 +++++++------- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | 15 +++++++-------- 6 files changed, 34 insertions(+), 33 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 3c76ee24a17e..44d00cc5ffe2 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -181,7 +181,7 @@ static int mlx5_netdev_event(struct notifier_block *this, ibdev->rep->vport); if (rep_ndev == ndev) roce->netdev = ndev; - } else if (ndev->dev.parent == &mdev->pdev->dev) { + } else if (ndev->dev.parent == mdev->device) { roce->netdev = ndev; } write_unlock(&roce->netdev_lock); @@ -5666,7 +5666,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev) } if (bound) { - dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n"); + dev_dbg(mpi->mdev->device, + "removing port from unaffiliated list.\n"); mlx5_ib_dbg(dev, "port %d bound\n", i + 1); list_del(&mpi->list); break; @@ -5865,7 +5866,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev); - dev->ib_dev.dev.parent = &mdev->pdev->dev; + dev->ib_dev.dev.parent = mdev->device; mutex_init(&dev->cap_mask_mutex); INIT_LIST_HEAD(&dev->qp_list); @@ -6554,7 +6555,8 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev) if (!bound) { list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); - dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n"); + dev_dbg(mdev->device, + "no suitable IB device found to bind to, added to unaffiliated list.\n"); } mutex_unlock(&mlx5_ib_multiport_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 9008e17126db..549f962cd86e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -57,15 +57,16 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, int node) { struct mlx5_priv *priv = &dev->priv; + struct device *device = dev->device; int original_node; void *cpu_handle; mutex_lock(&priv->alloc_mutex); - original_node = dev_to_node(&dev->pdev->dev); - set_dev_node(&dev->pdev->dev, node); - cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle, + original_node = dev_to_node(device); + set_dev_node(device, node); + cpu_handle = dma_alloc_coherent(device, size, dma_handle, GFP_KERNEL); - set_dev_node(&dev->pdev->dev, original_node); + set_dev_node(device, original_node); mutex_unlock(&priv->alloc_mutex); return cpu_handle; } @@ -110,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) { - dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf, + dma_free_coherent(dev->device, buf->size, buf->frags->buf, buf->frags->map); kfree(buf->frags); @@ -139,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, if (!frag->buf) goto err_free_buf; if (frag->map & ((1 << buf->page_shift) - 1)) { - dma_free_coherent(&dev->pdev->dev, frag_sz, + dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf, buf->frags[i].map); mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n", &frag->map, buf->page_shift); @@ -152,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, err_free_buf: while (i--) - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf, + dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf, buf->frags[i].map); kfree(buf->frags); err_out: @@ -168,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) for (i = 0; i < buf->npages; i++) { int frag_sz = min_t(int, size, PAGE_SIZE); - dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf, + dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf, buf->frags[i].map); size -= frag_sz; } @@ -274,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) __set_bit(db->index, db->u.pgdir->bitmap); if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) { - dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, + dma_free_coherent(dev->device, PAGE_SIZE, db->u.pgdir->db_page, db->u.pgdir->db_dma); list_del(&db->u.pgdir->list); bitmap_free(db->u.pgdir->bitmap); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 779fa1e9ee4e..746c8cc95e48 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1852,7 +1852,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev) static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) { - struct device *ddev = &dev->pdev->dev; + struct device *ddev = dev->device; cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, &cmd->alloc_dma, GFP_KERNEL); @@ -1883,7 +1883,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) { - struct device *ddev = &dev->pdev->dev; + struct device *ddev = dev->device; dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, cmd->alloc_dma); @@ -1908,8 +1908,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) return -EINVAL; } - cmd->pool = dma_pool_create("mlx5_cmd", &dev->pdev->dev, size, align, - 0); + cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0); if (!cmd->pool) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b5fdbd3190d9..ae0f154d16db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1891,7 +1891,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->tstamp = &priv->tstamp; c->ix = ix; c->cpu = cpu; - c->pdev = &priv->mdev->pdev->dev; + c->pdev = priv->mdev->device; c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->num_tc = params->num_tc; @@ -2137,7 +2137,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); - param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); + param->wq.buf_numa_node = dev_to_node(mdev->device); } static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, @@ -2152,7 +2152,7 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter); - param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); + param->wq.buf_numa_node = dev_to_node(mdev->device); } static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, @@ -2164,7 +2164,7 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); - param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); + param->wq.buf_numa_node = dev_to_node(priv->mdev->device); } static void mlx5e_build_sq_param(struct mlx5e_priv *priv, @@ -3046,8 +3046,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, struct mlx5e_cq *cq, struct mlx5e_cq_param *param) { - param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); - param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev); + param->wq.buf_numa_node = dev_to_node(mdev->device); + param->wq.db_numa_node = dev_to_node(mdev->device); return mlx5e_alloc_cq_common(mdev, param, cq); } @@ -4639,7 +4639,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) bool fcs_supported; bool fcs_enabled; - SET_NETDEV_DEV(netdev, &mdev->pdev->dev); + SET_NETDEV_DEV(netdev, mdev->device); netdev->netdev_ops = &mlx5e_netdev_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index a66b6ed80b30..fc21403f140e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1389,7 +1389,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) struct mlx5_core_dev *mdev = priv->mdev; if (rep->vport == MLX5_VPORT_UPLINK) { - SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev); + SET_NETDEV_DEV(netdev, mdev->device); netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; /* we want a persistent mac for the uplink rep */ mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index d87d42f4f6dd..91bd258ecf1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -200,7 +200,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr) rb_erase(&fwp->rb_node, &dev->priv.page_root); if (fwp->free_count != 1) list_del(&fwp->list); - dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK, + dma_unmap_page(dev->device, addr & MLX5_U64_4K_PAGE_MASK, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(fwp->page); kfree(fwp); @@ -211,11 +211,12 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr) static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) { + struct device *device = dev->device; + int nid = dev_to_node(device); struct page *page; u64 zero_addr = 1; u64 addr; int err; - int nid = dev_to_node(&dev->pdev->dev); page = alloc_pages_node(nid, GFP_HIGHUSER, 0); if (!page) { @@ -223,9 +224,8 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) return -ENOMEM; } map: - addr = dma_map_page(&dev->pdev->dev, page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(&dev->pdev->dev, addr)) { + addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(device, addr)) { mlx5_core_warn(dev, "failed dma mapping page\n"); err = -ENOMEM; goto err_mapping; @@ -240,8 +240,7 @@ map: err = insert_page(dev, addr, page, func_id); if (err) { mlx5_core_err(dev, "failed to track allocated page\n"); - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); } err_mapping: @@ -249,7 +248,7 @@ err_mapping: __free_page(page); if (zero_addr == 0) - dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE, + dma_unmap_page(device, zero_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); return err; -- cgit v1.2.3-55-g7522 From 6cfdc7e4684287cea37dcfbfb1b545273e757355 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Mon, 29 Apr 2019 18:14:07 +0000 Subject: IB/mlx5: Restrict 'DELAY_DROP_TIMEOUT' subtype to Ethernet interfaces Subtype 'DELAY_DROP_TIMEOUT' (under 'GENERAL' event) is restricted to Ethernet interfaces. This patch doesn't change functionality or breaks current flow. In the downstream patch, non Ethernet (like IB) interfaces will receive 'GENERAL' event. Fixes: 5d3c537f9070 ("net/mlx5: Handle event of power detection in the PCIE slot") Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/main.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 44d00cc5ffe2..fae6a6a1fbea 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4347,9 +4347,13 @@ static void delay_drop_handler(struct work_struct *work) static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, struct ib_event *ibev) { + u8 port = (eqe->data.port.port >> 4) & 0xf; + switch (eqe->sub_type) { case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT: - schedule_work(&ibdev->delay_drop.delay_drop_work); + if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == + IB_LINK_LAYER_ETHERNET) + schedule_work(&ibdev->delay_drop.delay_drop_work); break; default: /* do nothing */ return; -- cgit v1.2.3-55-g7522 From 72c6f5243999304915e90c07bf390209fb282143 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Mon, 29 Apr 2019 18:14:09 +0000 Subject: net/mlx5: Enable general events on all interfaces Open events of type 'GENERAL' to all types of interfaces. Prior to this patch, 'GENERAL' events were captured only by Ethernet interfaces. Other interface types (non-Ethernet) were excluded and couldn't receive 'GENERAL' events. Fixes: 5d3c537f9070 ("net/mlx5: Handle event of power detection in the PCIE slot") Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index bb6e5b5d9681..3b617b5e1d9d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -504,8 +504,7 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev) if (MLX5_VPORT_MANAGER(dev)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); - if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && - MLX5_CAP_GEN(dev, general_notification_event)) + if (MLX5_CAP_GEN(dev, general_notification_event)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT); if (MLX5_CAP_GEN(dev, port_module_event)) -- cgit v1.2.3-55-g7522 From ae288a487514ed0b87dd489b77eeca09e1a32fc1 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Mon, 29 Apr 2019 18:14:10 +0000 Subject: net/mlx5: Pass flow steering objects to fs_cmd Pass the flow steering objects instead of their attributes to fs_cmd in order to decrease number of arguments and in addition it will be used to update object fields. Pass the flow steering root namespace instead of the device so will have context to the namespace in the fs_cmd layer. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 86 ++++++++++--------- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 99 +++++++++++----------- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h | 33 ++++---- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 42 ++++----- 4 files changed, 128 insertions(+), 132 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 5a22c5874f3b..52c47d3dd5a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -989,32 +989,33 @@ static enum fs_flow_table_type egress_to_fs_ft(bool egress) return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX; } -static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev, +static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 *in, - unsigned int *group_id, + struct mlx5_flow_group *fg, bool is_egress) { - int (*create_flow_group)(struct mlx5_core_dev *dev, + int (*create_flow_group)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 *in, - unsigned int *group_id) = + struct mlx5_flow_group *fg) = mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group; char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria.misc_parameters); + struct mlx5_core_dev *dev = ns->dev; u32 saved_outer_esp_spi_mask; u8 match_criteria_enable; int ret; if (MLX5_CAP_FLOWTABLE(dev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) - return create_flow_group(dev, ft, in, group_id); + return create_flow_group(ns, ft, in, fg); match_criteria_enable = MLX5_GET(create_flow_group_in, in, match_criteria_enable); saved_outer_esp_spi_mask = MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); if (!match_criteria_enable || !saved_outer_esp_spi_mask) - return create_flow_group(dev, ft, in, group_id); + return create_flow_group(ns, ft, in, fg); MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0); @@ -1023,7 +1024,7 @@ static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev, MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS); - ret = create_flow_group(dev, ft, in, group_id); + ret = create_flow_group(ns, ft, in, fg); MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask); MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable); @@ -1031,17 +1032,18 @@ static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev, return ret; } -static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev, +static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *fg, struct fs_fte *fte, bool is_egress) { - int (*create_fte)(struct mlx5_core_dev *dev, + int (*create_fte)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *fg, struct fs_fte *fte) = mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte; + struct mlx5_core_dev *dev = ns->dev; struct mlx5_fpga_device *fdev = dev->fpga; struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; struct mlx5_fpga_ipsec_rule *rule; @@ -1053,7 +1055,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev, !(fte->action.action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) - return create_fte(dev, ft, fg, fte); + return create_fte(ns, ft, fg, fte); rule = kzalloc(sizeof(*rule), GFP_KERNEL); if (!rule) @@ -1070,7 +1072,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev, WARN_ON(rule_insert(fipsec, rule)); modify_spec_mailbox(dev, fte, &mbox_mod); - ret = create_fte(dev, ft, fg, fte); + ret = create_fte(ns, ft, fg, fte); restore_spec_mailbox(fte, &mbox_mod); if (ret) { _rule_delete(fipsec, rule); @@ -1081,19 +1083,20 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev, return ret; } -static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev, +static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int group_id, + struct mlx5_flow_group *fg, int modify_mask, struct fs_fte *fte, bool is_egress) { - int (*update_fte)(struct mlx5_core_dev *dev, + int (*update_fte)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int group_id, + struct mlx5_flow_group *fg, int modify_mask, struct fs_fte *fte) = mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte; + struct mlx5_core_dev *dev = ns->dev; bool is_esp = fte->action.esp_id; struct mailbox_mod mbox_mod; int ret; @@ -1102,24 +1105,25 @@ static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev, !(fte->action.action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) - return update_fte(dev, ft, group_id, modify_mask, fte); + return update_fte(ns, ft, fg, modify_mask, fte); modify_spec_mailbox(dev, fte, &mbox_mod); - ret = update_fte(dev, ft, group_id, modify_mask, fte); + ret = update_fte(ns, ft, fg, modify_mask, fte); restore_spec_mailbox(fte, &mbox_mod); return ret; } -static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev, +static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct fs_fte *fte, bool is_egress) { - int (*delete_fte)(struct mlx5_core_dev *dev, + int (*delete_fte)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct fs_fte *fte) = mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte; + struct mlx5_core_dev *dev = ns->dev; struct mlx5_fpga_device *fdev = dev->fpga; struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; struct mlx5_fpga_ipsec_rule *rule; @@ -1131,7 +1135,7 @@ static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev, !(fte->action.action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) - return delete_fte(dev, ft, fte); + return delete_fte(ns, ft, fte); rule = rule_search(fipsec, fte); if (!rule) @@ -1141,84 +1145,84 @@ static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev, rule_delete(fipsec, rule); modify_spec_mailbox(dev, fte, &mbox_mod); - ret = delete_fte(dev, ft, fte); + ret = delete_fte(ns, ft, fte); restore_spec_mailbox(fte, &mbox_mod); return ret; } static int -mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev, +mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 *in, - unsigned int *group_id) + struct mlx5_flow_group *fg) { - return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true); + return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true); } static int -mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev, +mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *fg, struct fs_fte *fte) { - return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true); + return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true); } static int -mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev, +mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int group_id, + struct mlx5_flow_group *fg, int modify_mask, struct fs_fte *fte) { - return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte, + return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte, true); } static int -mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev, +mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct fs_fte *fte) { - return fpga_ipsec_fs_delete_fte(dev, ft, fte, true); + return fpga_ipsec_fs_delete_fte(ns, ft, fte, true); } static int -mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev, +mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 *in, - unsigned int *group_id) + struct mlx5_flow_group *fg) { - return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false); + return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false); } static int -mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev, +mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *fg, struct fs_fte *fte) { - return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false); + return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false); } static int -mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev, +mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int group_id, + struct mlx5_flow_group *fg, int modify_mask, struct fs_fte *fte) { - return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte, + return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte, false); } static int -mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev, +mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct fs_fte *fte) { - return fpga_ipsec_fs_delete_fte(dev, ft, fte, false); + return fpga_ipsec_fs_delete_fte(ns, ft, fte, false); } static struct mlx5_flow_cmds fpga_ipsec_ingress; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index c44ccb67c4a3..e89555da485e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -39,7 +39,7 @@ #include "mlx5_core.h" #include "eswitch.h" -static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev, +static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 underlay_qpn, bool disconnect) @@ -47,47 +47,43 @@ static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev, return 0; } -static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev, - u16 vport, - enum fs_flow_table_op_mod op_mod, - enum fs_flow_table_type type, - unsigned int level, +static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, unsigned int log_size, - struct mlx5_flow_table *next_ft, - unsigned int *table_id, u32 flags) + struct mlx5_flow_table *next_ft) { return 0; } -static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev, +static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft) { return 0; } -static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev, +static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_table *next_ft) { return 0; } -static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev, +static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 *in, - unsigned int *group_id) + struct mlx5_flow_group *fg) { return 0; } -static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev, +static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int group_id) + struct mlx5_flow_group *fg) { return 0; } -static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev, +static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *group, struct fs_fte *fte) @@ -95,28 +91,29 @@ static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev, return 0; } -static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev, +static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int group_id, + struct mlx5_flow_group *group, int modify_mask, struct fs_fte *fte) { return -EOPNOTSUPP; } -static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev, +static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct fs_fte *fte) { return 0; } -static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, +static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 underlay_qpn, bool disconnect) { u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; + struct mlx5_core_dev *dev = ns->dev; if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && underlay_qpn == 0) @@ -143,29 +140,26 @@ static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, - u16 vport, - enum fs_flow_table_op_mod op_mod, - enum fs_flow_table_type type, - unsigned int level, +static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, unsigned int log_size, - struct mlx5_flow_table *next_ft, - unsigned int *table_id, u32 flags) + struct mlx5_flow_table *next_ft) { - int en_encap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT); - int en_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); + int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT); + int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; + struct mlx5_core_dev *dev = ns->dev; int err; MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); - MLX5_SET(create_flow_table_in, in, table_type, type); - MLX5_SET(create_flow_table_in, in, flow_table_context.level, level); + MLX5_SET(create_flow_table_in, in, table_type, ft->type); + MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level); MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size); - if (vport) { - MLX5_SET(create_flow_table_in, in, vport_number, vport); + if (ft->vport) { + MLX5_SET(create_flow_table_in, in, vport_number, ft->vport); MLX5_SET(create_flow_table_in, in, other_vport, 1); } @@ -174,7 +168,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en, en_encap); - switch (op_mod) { + switch (ft->op_mod) { case FS_FT_OP_MOD_NORMAL: if (next_ft) { MLX5_SET(create_flow_table_in, in, @@ -195,16 +189,17 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (!err) - *table_id = MLX5_GET(create_flow_table_out, out, - table_id); + ft->id = MLX5_GET(create_flow_table_out, out, + table_id); return err; } -static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, +static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft) { u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0}; u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0}; + struct mlx5_core_dev *dev = ns->dev; MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE); @@ -218,12 +213,13 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, +static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_table *next_ft) { u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0}; + struct mlx5_core_dev *dev = ns->dev; MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE); @@ -263,13 +259,14 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, +static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 *in, - unsigned int *group_id) + struct mlx5_flow_group *fg) { u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_core_dev *dev = ns->dev; int err; MLX5_SET(create_flow_group_in, in, opcode, @@ -283,23 +280,24 @@ static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) - *group_id = MLX5_GET(create_flow_group_out, out, - group_id); + fg->id = MLX5_GET(create_flow_group_out, out, + group_id); return err; } -static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, +static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int group_id) + struct mlx5_flow_group *fg) { u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0}; u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0}; + struct mlx5_core_dev *dev = ns->dev; MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); MLX5_SET(destroy_flow_group_in, in, table_type, ft->type); MLX5_SET(destroy_flow_group_in, in, table_id, ft->id); - MLX5_SET(destroy_flow_group_in, in, group_id, group_id); + MLX5_SET(destroy_flow_group_in, in, group_id, fg->id); if (ft->vport) { MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport); MLX5_SET(destroy_flow_group_in, in, other_vport, 1); @@ -505,23 +503,25 @@ err_out: return err; } -static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, +static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *group, struct fs_fte *fte) { + struct mlx5_core_dev *dev = ns->dev; unsigned int group_id = group->id; return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); } -static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, +static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int group_id, + struct mlx5_flow_group *fg, int modify_mask, struct fs_fte *fte) { int opmod; + struct mlx5_core_dev *dev = ns->dev; int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev, flow_table_properties_nic_receive. flow_modify_en); @@ -529,15 +529,16 @@ static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, return -EOPNOTSUPP; opmod = 1; - return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); + return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte); } -static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, +static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct fs_fte *fte) { u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0}; u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0}; + struct mlx5_core_dev *dev = ns->dev; MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); MLX5_SET(delete_fte_in, in, table_type, ft->type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 6228ba7bfa1a..e340f9af2f5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -36,45 +36,42 @@ #include "fs_core.h" struct mlx5_flow_cmds { - int (*create_flow_table)(struct mlx5_core_dev *dev, - u16 vport, - enum fs_flow_table_op_mod op_mod, - enum fs_flow_table_type type, - unsigned int level, unsigned int log_size, - struct mlx5_flow_table *next_ft, - unsigned int *table_id, u32 flags); - int (*destroy_flow_table)(struct mlx5_core_dev *dev, + int (*create_flow_table)(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + unsigned int log_size, + struct mlx5_flow_table *next_ft); + int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft); - int (*modify_flow_table)(struct mlx5_core_dev *dev, + int (*modify_flow_table)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_table *next_ft); - int (*create_flow_group)(struct mlx5_core_dev *dev, + int (*create_flow_group)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 *in, - unsigned int *group_id); + struct mlx5_flow_group *fg); - int (*destroy_flow_group)(struct mlx5_core_dev *dev, + int (*destroy_flow_group)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int group_id); + struct mlx5_flow_group *fg); - int (*create_fte)(struct mlx5_core_dev *dev, + int (*create_fte)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *fg, struct fs_fte *fte); - int (*update_fte)(struct mlx5_core_dev *dev, + int (*update_fte)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int group_id, + struct mlx5_flow_group *fg, int modify_mask, struct fs_fte *fte); - int (*delete_fte)(struct mlx5_core_dev *dev, + int (*delete_fte)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct fs_fte *fte); - int (*update_root_ft)(struct mlx5_core_dev *dev, + int (*update_root_ft)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 underlay_qpn, bool disconnect); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 9fcef7e3b86d..6024df821734 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -403,7 +403,7 @@ static void del_hw_flow_table(struct fs_node *node) trace_mlx5_fs_del_ft(ft); if (node->active) { - err = root->cmds->destroy_flow_table(dev, ft); + err = root->cmds->destroy_flow_table(root, ft); if (err) mlx5_core_warn(dev, "flow steering can't destroy ft\n"); } @@ -435,7 +435,7 @@ static void modify_fte(struct fs_fte *fte) dev = get_dev(&fte->node); root = find_root(&ft->node); - err = root->cmds->update_fte(dev, ft, fg->id, fte->modify_mask, fte); + err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte); if (err) mlx5_core_warn(dev, "%s can't del rule fg id=%d fte_index=%d\n", @@ -492,7 +492,7 @@ static void del_hw_fte(struct fs_node *node) dev = get_dev(&ft->node); root = find_root(&ft->node); if (node->active) { - err = root->cmds->delete_fte(dev, ft, fte); + err = root->cmds->delete_fte(root, ft, fte); if (err) mlx5_core_warn(dev, "flow steering can't delete fte in index %d of flow group id %d\n", @@ -532,7 +532,7 @@ static void del_hw_flow_group(struct fs_node *node) trace_mlx5_fs_del_fg(fg); root = find_root(&ft->node); - if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id)) + if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg)) mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", fg->id, ft->id); } @@ -783,7 +783,7 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev, fs_for_each_ft(iter, prio) { i++; - err = root->cmds->modify_flow_table(dev, iter, ft); + err = root->cmds->modify_flow_table(root, iter, ft); if (err) { mlx5_core_warn(dev, "Failed to modify flow table %d\n", iter->id); @@ -831,11 +831,11 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio if (list_empty(&root->underlay_qpns)) { /* Don't set any QPN (zero) in case QPN list is empty */ qpn = 0; - err = root->cmds->update_root_ft(root->dev, ft, qpn, false); + err = root->cmds->update_root_ft(root, ft, qpn, false); } else { list_for_each_entry(uqp, &root->underlay_qpns, list) { qpn = uqp->qpn; - err = root->cmds->update_root_ft(root->dev, ft, + err = root->cmds->update_root_ft(root, ft, qpn, false); if (err) break; @@ -871,7 +871,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, memcpy(&rule->dest_attr, dest, sizeof(*dest)); root = find_root(&ft->node); - err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, + err = root->cmds->update_fte(root, ft, fg, modify_mask, fte); up_write_ref_node(&fte->node, false); @@ -1013,9 +1013,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; next_ft = find_next_chained_ft(fs_prio); - err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod, - ft->type, ft->level, log_table_sz, - next_ft, &ft->id, ft->flags); + err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft); if (err) goto free_ft; @@ -1032,7 +1030,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa trace_mlx5_fs_add_ft(ft); return ft; destroy_ft: - root->cmds->destroy_flow_table(root->dev, ft); + root->cmds->destroy_flow_table(root, ft); free_ft: kfree(ft); unlock_root: @@ -1114,7 +1112,6 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, start_flow_index); int end_index = MLX5_GET(create_flow_group_in, fg_in, end_flow_index); - struct mlx5_core_dev *dev = get_dev(&ft->node); struct mlx5_flow_group *fg; int err; @@ -1129,7 +1126,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, if (IS_ERR(fg)) return fg; - err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id); + err = root->cmds->create_flow_group(root, ft, fg_in, fg); if (err) { tree_put_node(&fg->node, false); return ERR_PTR(err); @@ -1269,11 +1266,9 @@ add_rule_fte(struct fs_fte *fte, fs_get_obj(ft, fg->node.parent); root = find_root(&fg->node); if (!(fte->status & FS_FTE_STATUS_EXISTING)) - err = root->cmds->create_fte(get_dev(&ft->node), - ft, fg, fte); + err = root->cmds->create_fte(root, ft, fg, fte); else - err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, - modify_mask, fte); + err = root->cmds->update_fte(root, ft, fg, modify_mask, fte); if (err) goto free_handle; @@ -1339,7 +1334,6 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, struct mlx5_flow_group *fg) { struct mlx5_flow_root_namespace *root = find_root(&ft->node); - struct mlx5_core_dev *dev = get_dev(&ft->node); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); void *match_criteria_addr; u8 src_esw_owner_mask_on; @@ -1369,7 +1363,7 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, memcpy(match_criteria_addr, fg->mask.match_criteria, sizeof(fg->mask.match_criteria)); - err = root->cmds->create_flow_group(dev, ft, in, &fg->id); + err = root->cmds->create_flow_group(root, ft, in, fg); if (!err) { fg->node.active = true; trace_mlx5_fs_add_fg(fg); @@ -1941,12 +1935,12 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft) if (list_empty(&root->underlay_qpns)) { /* Don't set any QPN (zero) in case QPN list is empty */ qpn = 0; - err = root->cmds->update_root_ft(root->dev, new_root_ft, + err = root->cmds->update_root_ft(root, new_root_ft, qpn, false); } else { list_for_each_entry(uqp, &root->underlay_qpns, list) { qpn = uqp->qpn; - err = root->cmds->update_root_ft(root->dev, + err = root->cmds->update_root_ft(root, new_root_ft, qpn, false); if (err) @@ -2762,7 +2756,7 @@ int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) goto update_ft_fail; } - err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn, + err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn, false); if (err) { mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n", @@ -2806,7 +2800,7 @@ int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) goto out; } - err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn, + err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn, true); if (err) mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n", -- cgit v1.2.3-55-g7522 From d83eb50e29de36ddc819863ab7b9d2da58bccbd0 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Mon, 29 Apr 2019 18:14:12 +0000 Subject: net/mlx5: Add support in RDMA RX steering Add new flow steering namespace - MLX5_FLOW_NAMESPACE_RDMA_RX. Flow steering rules in this namespace are used to filter RDMA traffic. Signed-off-by: Maor Gottlieb Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 27 +++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 2 ++ include/linux/mlx5/device.h | 6 +++++ include/linux/mlx5/fs.h | 1 + include/linux/mlx5/mlx5_ifc.h | 2 +- 6 files changed, 38 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index e89555da485e..629c5ab1c0d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -854,6 +854,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ case FS_FT_SNIFFER_RX: case FS_FT_SNIFFER_TX: case FS_FT_NIC_TX: + case FS_FT_RDMA_RX: return mlx5_fs_cmd_get_fw_cmds(); default: return mlx5_fs_cmd_get_stub_cmds(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6024df821734..3c2302a2b9d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2054,6 +2054,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, if (steering->sniffer_tx_root_ns) return &steering->sniffer_tx_root_ns->ns; return NULL; + case MLX5_FLOW_NAMESPACE_RDMA_RX: + if (steering->rdma_rx_root_ns) + return &steering->rdma_rx_root_ns->ns; + return NULL; default: break; } @@ -2450,6 +2454,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) steering->fdb_sub_ns = NULL; cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns); + cleanup_root_ns(steering->rdma_rx_root_ns); cleanup_root_ns(steering->egress_root_ns); mlx5_cleanup_fc_stats(dev); kmem_cache_destroy(steering->ftes_cache); @@ -2491,6 +2496,22 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering) return 0; } +static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering) +{ + struct fs_prio *prio; + + steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX); + if (!steering->rdma_rx_root_ns) + return -ENOMEM; + + /* Create single prio */ + prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1); + if (IS_ERR(prio)) { + cleanup_root_ns(steering->rdma_rx_root_ns); + return PTR_ERR(prio); + } + return 0; +} static int init_fdb_root_ns(struct mlx5_flow_steering *steering) { struct mlx5_flow_namespace *ns; @@ -2727,6 +2748,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } + if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support)) { + err = init_rdma_rx_root_ns(steering); + if (err) + goto err; + } + if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) { err = init_egress_root_ns(steering); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 87de0e4d9124..e43c6f6d46a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -67,6 +67,7 @@ enum fs_flow_table_type { FS_FT_FDB = 0X4, FS_FT_SNIFFER_RX = 0X5, FS_FT_SNIFFER_TX = 0X6, + FS_FT_RDMA_RX = 0X7, FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX, }; @@ -90,6 +91,7 @@ struct mlx5_flow_steering { struct mlx5_flow_root_namespace **esw_ingress_root_ns; struct mlx5_flow_root_namespace *sniffer_tx_root_ns; struct mlx5_flow_root_namespace *sniffer_rx_root_ns; + struct mlx5_flow_root_namespace *rdma_rx_root_ns; struct mlx5_flow_root_namespace *egress_root_ns; }; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index f93a5598b942..28ebb6c93542 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1166,6 +1166,12 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap) +#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap) + +#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \ + MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap) + #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index fd91df3a4e09..e690ba0f965c 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -73,6 +73,7 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_SNIFFER_RX, MLX5_FLOW_NAMESPACE_SNIFFER_TX, MLX5_FLOW_NAMESPACE_EGRESS, + MLX5_FLOW_NAMESPACE_RDMA_RX, }; enum { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index eeedf3f53ed3..89e7194b3d97 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -598,7 +598,7 @@ struct mlx5_ifc_flow_table_nic_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; - u8 reserved_at_400[0x200]; + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_rdma; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; -- cgit v1.2.3-55-g7522 From f6f7d6b5bd818cc84eebb55ba6bcba38cfd3b385 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Mon, 29 Apr 2019 18:14:14 +0000 Subject: net/mlx5: Add new miss flow table action Flow table supports three types of miss action: 1. Default miss action - go to default miss table according to table. 2. Go to specific table. 3. Switch domain - go to the root table of an alternative steering table domain. New table miss action was added - switch_domain. The next domain for RDMA_RX namespace is the NIC RX domain. Signed-off-by: Maor Gottlieb Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 13 ++++++++++--- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 6 +++++- drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 1 + include/linux/mlx5/mlx5_ifc.h | 10 +++++++++- 4 files changed, 25 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 629c5ab1c0d1..013b1ca4a791 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -172,9 +172,14 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, case FS_FT_OP_MOD_NORMAL: if (next_ft) { MLX5_SET(create_flow_table_in, in, - flow_table_context.table_miss_action, 1); + flow_table_context.table_miss_action, + MLX5_FLOW_TABLE_MISS_ACTION_FWD); MLX5_SET(create_flow_table_in, in, flow_table_context.table_miss_id, next_ft->id); + } else { + MLX5_SET(create_flow_table_in, in, + flow_table_context.table_miss_action, + ns->def_miss_action); } break; @@ -246,13 +251,15 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns, MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); if (next_ft) { MLX5_SET(modify_flow_table_in, in, - flow_table_context.table_miss_action, 1); + flow_table_context.table_miss_action, + MLX5_FLOW_TABLE_MISS_ACTION_FWD); MLX5_SET(modify_flow_table_in, in, flow_table_context.table_miss_id, next_ft->id); } else { MLX5_SET(modify_flow_table_in, in, - flow_table_context.table_miss_action, 0); + flow_table_context.table_miss_action, + ns->def_miss_action); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 3c2302a2b9d4..fb5b61727ee7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2504,6 +2504,9 @@ static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering) if (!steering->rdma_rx_root_ns) return -ENOMEM; + steering->rdma_rx_root_ns->def_miss_action = + MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN; + /* Create single prio */ prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1); if (IS_ERR(prio)) { @@ -2748,7 +2751,8 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } - if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support)) { + if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) && + MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) { err = init_rdma_rx_root_ns(steering); if (err) goto err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index e43c6f6d46a7..0c6c5fef4548 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -218,6 +218,7 @@ struct mlx5_flow_root_namespace { struct mutex chain_lock; struct list_head underlay_qpns; const struct mlx5_flow_cmds *cmds; + enum mlx5_flow_table_miss_action def_miss_action; }; int mlx5_init_fc_stats(struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 89e7194b3d97..7d9264b282d1 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -370,7 +370,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reformat_l3_tunnel_to_l2[0x1]; u8 reformat_l2_to_l3_tunnel[0x1]; u8 reformat_and_modify_action[0x1]; - u8 reserved_at_15[0xb]; + u8 reserved_at_15[0x2]; + u8 table_miss_action_domain[0x1]; + u8 reserved_at_18[0x8]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; u8 log_max_modify_header_context[0x8]; @@ -1284,6 +1286,12 @@ enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101, }; +enum mlx5_flow_table_miss_action { + MLX5_FLOW_TABLE_MISS_ACTION_DEF, + MLX5_FLOW_TABLE_MISS_ACTION_FWD, + MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN, +}; + struct mlx5_ifc_dest_format_struct_bits { u8 destination_type[0x8]; u8 destination_id[0x18]; -- cgit v1.2.3-55-g7522 From 80f09dfc237f181e92968a72d97b7a4202baa453 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Mon, 29 Apr 2019 18:14:16 +0000 Subject: net/mlx5: Eswitch, enable RoCE loopback traffic When in switchdev mode, we would like to treat loopback RoCE traffic (on eswitch manager) as RDMA and not as regular Ethernet traffic In order to enable it we add flow steering rule that forward RoCE loopback traffic to the HW RoCE filter (by adding allow rule). In addition we add RoCE address in GID index 0, which will be set in the RoCE loopback packet. Signed-off-by: Maor Gottlieb Reviewed-by: Mark Bloch Acked-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 4 + drivers/net/ethernet/mellanox/mlx5/core/rdma.c | 182 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/rdma.h | 20 +++ include/linux/mlx5/driver.h | 7 + 5 files changed, 214 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/rdma.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/rdma.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 1a16f6d73cbc..5f0be9b36a04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -35,7 +35,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tu # # Core extra # -mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o +mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o rdma.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 6c8a17ca236e..9a1598bca4d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -37,6 +37,7 @@ #include #include "mlx5_core.h" #include "eswitch.h" +#include "rdma.h" #include "en.h" #include "fs_core.h" #include "lib/devcom.h" @@ -1713,6 +1714,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, esw->host_info.num_vfs = vf_nvports; } + mlx5_rdma_enable_roce(esw->dev); + return 0; err_reps: @@ -1751,6 +1754,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw) num_vfs = esw->dev->priv.sriov.num_vfs; } + mlx5_rdma_disable_roce(esw->dev); esw_offloads_devcom_cleanup(esw); esw_offloads_unload_all_reps(esw, num_vfs); esw_offloads_steering_cleanup(esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c new file mode 100644 index 000000000000..86f77456f873 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies */ + +#include +#include +#include + +#include "lib/mlx5.h" +#include "eswitch.h" +#include "fs_core.h" +#include "rdma.h" + +static void mlx5_rdma_disable_roce_steering(struct mlx5_core_dev *dev) +{ + struct mlx5_core_roce *roce = &dev->priv.roce; + + if (!roce->ft) + return; + + mlx5_del_flow_rules(roce->allow_rule); + mlx5_destroy_flow_group(roce->fg); + mlx5_destroy_flow_table(roce->ft); +} + +static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_core_roce *roce = &dev->priv.roce; + struct mlx5_flow_handle *flow_rule = NULL; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_namespace *ns = NULL; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_spec *spec; + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + void *match_criteria; + u32 *flow_group_in; + void *misc; + int err; + + if (!(MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) && + MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain))) + return -EOPNOTSUPP; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) { + kvfree(flow_group_in); + return -ENOMEM; + } + + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX); + if (!ns) { + mlx5_core_err(dev, "Failed to get RDMA RX namespace"); + err = -EOPNOTSUPP; + goto free; + } + + ft_attr.max_fte = 1; + ft = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(ft)) { + mlx5_core_err(dev, "Failed to create RDMA RX flow table"); + err = PTR_ERR(ft); + goto free; + } + + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, + match_criteria); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + misc_parameters.source_port); + + fg = mlx5_create_flow_group(ft, flow_group_in); + if (IS_ERR(fg)) { + err = PTR_ERR(fg); + mlx5_core_err(dev, "Failed to create RDMA RX flow group err(%d)\n", err); + goto destroy_flow_table; + } + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, + dev->priv.eswitch->manager_vport); + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + mlx5_core_err(dev, "Failed to add RoCE allow rule, err=%d\n", + err); + goto destroy_flow_group; + } + + kvfree(spec); + kvfree(flow_group_in); + roce->ft = ft; + roce->fg = fg; + roce->allow_rule = flow_rule; + + return 0; + +destroy_flow_table: + mlx5_destroy_flow_table(ft); +destroy_flow_group: + mlx5_destroy_flow_group(fg); +free: + kvfree(spec); + kvfree(flow_group_in); + return err; +} + +static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev) +{ + mlx5_core_roce_gid_set(dev, 0, 0, 0, + NULL, NULL, false, 0, 0); +} + +static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid) +{ + u8 hw_id[ETH_ALEN]; + + mlx5_query_nic_vport_mac_address(dev, 0, hw_id); + gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); + addrconf_addr_eui48(&gid->raw[8], hw_id); +} + +static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev) +{ + union ib_gid gid; + u8 mac[ETH_ALEN]; + + mlx5_rdma_make_default_gid(dev, &gid); + return mlx5_core_roce_gid_set(dev, 0, + MLX5_ROCE_VERSION_1, + 0, gid.raw, mac, + false, 0, 1); +} + +void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) +{ + mlx5_rdma_disable_roce_steering(dev); + mlx5_rdma_del_roce_addr(dev); + mlx5_nic_vport_disable_roce(dev); +} + +void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) +{ + int err; + + err = mlx5_nic_vport_enable_roce(dev); + if (err) { + mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err); + return; + } + + err = mlx5_rdma_add_roce_addr(dev); + if (err) { + mlx5_core_err(dev, "Failed to add RoCE address: %d\n", err); + goto disable_roce; + } + + err = mlx5_rdma_enable_roce_steering(dev); + if (err) { + mlx5_core_err(dev, "Failed to enable RoCE steering: %d\n", err); + goto del_roce_addr; + } + + return; + +del_roce_addr: + mlx5_rdma_del_roce_addr(dev); +disable_roce: + mlx5_nic_vport_disable_roce(dev); + return; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h new file mode 100644 index 000000000000..750cff2a71a4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __MLX5_RDMA_H__ +#define __MLX5_RDMA_H__ + +#include "mlx5_core.h" + +#ifdef CONFIG_MLX5_ESWITCH + +void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev); +void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev); + +#else /* CONFIG_MLX5_ESWITCH */ + +static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {} +static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {} + +#endif /* CONFIG_MLX5_ESWITCH */ +#endif /* __MLX5_RDMA_H__ */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 582a9680b182..7fa95270dd59 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -512,6 +512,12 @@ struct mlx5_rl_table { struct mlx5_rl_entry *rl_entry; }; +struct mlx5_core_roce { + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + struct mlx5_flow_handle *allow_rule; +}; + struct mlx5_priv { struct mlx5_eq_table *eq_table; @@ -565,6 +571,7 @@ struct mlx5_priv { struct mlx5_lag *lag; struct mlx5_devcom *devcom; unsigned long pci_dev_data; + struct mlx5_core_roce roce; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table; -- cgit v1.2.3-55-g7522 From 75d90e7def8e20b57c1de9e2b672c3bf9249da83 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Mon, 29 Apr 2019 18:14:18 +0000 Subject: net/mlx5: Geneve, Add basic Geneve encap/decap flow table capabilities Introduce support for Geneve flow specification and allow the creation of rules that are matching on basic Geneve protocol fields: VNI, OAM bit, protocol type, options length. Reviewed-by: Oz Shlomo Signed-off-by: Yevgeny Kliteynik Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 7d9264b282d1..268ac126b3bb 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -307,7 +307,11 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 outer_gre_protocol[0x1]; u8 outer_gre_key[0x1]; u8 outer_vxlan_vni[0x1]; - u8 reserved_at_1a[0x5]; + u8 outer_geneve_vni[0x1]; + u8 outer_geneve_oam[0x1]; + u8 outer_geneve_protocol_type[0x1]; + u8 outer_geneve_opt_len[0x1]; + u8 reserved_at_1e[0x1]; u8 source_eswitch_port[0x1]; u8 inner_dmac[0x1]; @@ -480,7 +484,9 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 vxlan_vni[0x18]; u8 reserved_at_b8[0x8]; - u8 reserved_at_c0[0x20]; + u8 geneve_vni[0x18]; + u8 reserved_at_d8[0x7]; + u8 geneve_oam[0x1]; u8 reserved_at_e0[0xc]; u8 outer_ipv6_flow_label[0x14]; @@ -488,7 +494,11 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 reserved_at_100[0xc]; u8 inner_ipv6_flow_label[0x14]; - u8 reserved_at_120[0x28]; + u8 reserved_at_120[0xa]; + u8 geneve_opt_len[0x6]; + u8 geneve_protocol_type[0x10]; + + u8 reserved_at_140[0x8]; u8 bth_dst_qp[0x18]; u8 reserved_at_160[0x20]; u8 outer_esp_spi[0x20]; -- cgit v1.2.3-55-g7522 From b169e64a24442e02cafee1586f17fcb713fe65a6 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Mon, 29 Apr 2019 18:14:20 +0000 Subject: net/mlx5: Geneve, Add flow table capabilities for Geneve decap with TLV options Introduce specification for Geneve decap flow with encapsulation options and allow creation of rules that are matching on Geneve TLV options. Reviewed-by: Oz Shlomo Signed-off-by: Yevgeny Kliteynik Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 2 +- include/linux/mlx5/device.h | 4 +- include/linux/mlx5/mlx5_ifc.h | 50 ++++++++++++++++++++--- 3 files changed, 49 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 0c6c5fef4548..a08c3d09a50f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -152,7 +152,7 @@ struct mlx5_ft_underlay_qp { u32 qpn; }; -#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_800 +#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_a00 /* Calculate the fte_match_param length and without the reserved length. * Make sure the reserved field is the last. */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 28ebb6c93542..4ab801040e98 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1001,7 +1001,8 @@ enum { MLX5_MATCH_OUTER_HEADERS = 1 << 0, MLX5_MATCH_MISC_PARAMETERS = 1 << 1, MLX5_MATCH_INNER_HEADERS = 1 << 2, - + MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3, + MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4, }; enum { @@ -1045,6 +1046,7 @@ enum mlx5_mpls_supported_fields { }; enum mlx5_flex_parser_protos { + MLX5_FLEX_PROTO_GENEVE = 1 << 3, MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4, MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5, }; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 268ac126b3bb..6a7fc18a9fe3 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -86,6 +86,11 @@ enum { enum { MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM), + MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11), +}; + +enum { + MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, }; enum { @@ -339,7 +344,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 inner_tcp_flags[0x1]; u8 reserved_at_37[0x9]; - u8 reserved_at_40[0x5]; + u8 geneve_tlv_option_0_data[0x1]; + u8 reserved_at_41[0x4]; u8 outer_first_mpls_over_udp[0x4]; u8 outer_first_mpls_over_gre[0x4]; u8 inner_first_mpls[0x4]; @@ -528,6 +534,12 @@ struct mlx5_ifc_fte_match_set_misc2_bits { u8 reserved_at_1a0[0x60]; }; +struct mlx5_ifc_fte_match_set_misc3_bits { + u8 reserved_at_0[0x120]; + u8 geneve_tlv_option_0_data[0x20]; + u8 reserved_at_140[0xc0]; +}; + struct mlx5_ifc_cmd_pas_bits { u8 pa_h[0x20]; @@ -1247,9 +1259,13 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 num_of_uars_per_page[0x20]; u8 flex_parser_protocols[0x20]; - u8 reserved_at_560[0x20]; - u8 reserved_at_580[0x3c]; + u8 max_geneve_tlv_options[0x8]; + u8 reserved_at_568[0x3]; + u8 max_geneve_tlv_option_data_len[0x5]; + u8 reserved_at_570[0x10]; + + u8 reserved_at_580[0x1c]; u8 mini_cqe_resp_stride_index[0x1]; u8 cqe_128_always[0x1]; u8 cqe_compression_128[0x1]; @@ -1283,7 +1299,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 uctx_cap[0x20]; - u8 reserved_at_6c0[0x140]; + u8 reserved_at_6c0[0x4]; + u8 flex_parser_id_geneve_tlv_option_0[0x4]; + u8 reserved_at_6c8[0x138]; }; enum mlx5_flow_destination_type { @@ -1341,7 +1359,9 @@ struct mlx5_ifc_fte_match_param_bits { struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2; - u8 reserved_at_800[0x800]; + struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3; + + u8 reserved_at_a00[0x600]; }; enum { @@ -4850,6 +4870,7 @@ enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, + MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4, }; struct mlx5_ifc_query_flow_group_out_bits { @@ -9545,6 +9566,20 @@ struct mlx5_ifc_sw_icm_bits { u8 sw_icm_start_addr[0x40]; u8 reserved_at_c0[0x140]; +}; + +struct mlx5_ifc_geneve_tlv_option_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x18]; + u8 geneve_option_fte_index[0x8]; + + u8 option_class[0x10]; + u8 option_type[0x8]; + u8 reserved_at_78[0x3]; + u8 option_data_length[0x5]; + + u8 reserved_at_80[0x180]; }; struct mlx5_ifc_create_umem_in_bits { @@ -9589,6 +9624,11 @@ struct mlx5_ifc_create_sw_icm_in_bits { struct mlx5_ifc_sw_icm_bits sw_icm; }; +struct mlx5_ifc_create_geneve_tlv_option_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_geneve_tlv_option_bits geneve_tlv_opt; +}; + struct mlx5_ifc_mtrc_string_db_param_bits { u8 string_db_base_address[0x20]; -- cgit v1.2.3-55-g7522 From 91a40a48d52d13fbde3239d5839335cabd9a4eae Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Wed, 1 May 2019 03:21:05 +0000 Subject: net/mlx5: Fix broken hca cap offset The cited commit broke the offsets of hca cap struct, fix it. While at it, cleanup a white space introduced by the same commit. Fixes: b169e64a2444 ("net/mlx5: Geneve, Add flow table capabilities for Geneve decap with TLV options") Reported-by: Qian Cai Cc: Yevgeny Kliteynik Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 6a7fc18a9fe3..6b2e6b710ac0 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1265,7 +1265,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_geneve_tlv_option_data_len[0x5]; u8 reserved_at_570[0x10]; - u8 reserved_at_580[0x1c]; + u8 reserved_at_580[0x3c]; u8 mini_cqe_resp_stride_index[0x1]; u8 cqe_128_always[0x1]; u8 cqe_compression_128[0x1]; @@ -9566,7 +9566,7 @@ struct mlx5_ifc_sw_icm_bits { u8 sw_icm_start_addr[0x40]; u8 reserved_at_c0[0x140]; -}; +}; struct mlx5_ifc_geneve_tlv_option_bits { u8 modify_field_select[0x40]; -- cgit v1.2.3-55-g7522 From 7306c274e7297307aceae99a8cb7f2a341b484e1 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 16 Jan 2019 14:31:22 +0200 Subject: net/mlx5e: Take common TIR context settings into a function Many TIR context settings are common to different TIR types, take them into a common function. Signed-off-by: Tariq Toukan Reviewed-by: Aya Levin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 49 ++++++++++------------- 1 file changed, 21 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 236cce59181d..d713ab2e7a2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2674,22 +2674,6 @@ free_in: return err; } -static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, - enum mlx5e_traffic_types tt, - u32 *tirc) -{ - MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); - - mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); - - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); - MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1); - - mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, - &tirc_default_config[tt], tirc, true); -} - static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 mtu) { @@ -3110,32 +3094,41 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); } -static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, - enum mlx5e_traffic_types tt, - u32 *tirc) +static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv, + u32 rqtn, u32 *tirc) { MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); + MLX5_SET(tirc, tirc, indirect_table, rqtn); mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); +} - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); - +static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, + enum mlx5e_traffic_types tt, + u32 *tirc) +{ + mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc); mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &tirc_default_config[tt], tirc, false); } static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) { - MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); - - mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); - - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, indirect_table, rqtn); + mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc); MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); } +static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, + enum mlx5e_traffic_types tt, + u32 *tirc) +{ + mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc); + mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, + &tirc_default_config[tt], tirc, true); + MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1); +} + int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) { struct mlx5e_tir *tir; -- cgit v1.2.3-55-g7522 From 69dad68d1bcf26dde3cc4b08b08c2260ae575ab6 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 20 Jan 2019 11:04:34 +0200 Subject: net/mlx5e: Turn on HW tunnel offload in all TIRs Hardware requires that all TIRs that steer traffic to the same RQ should share identical tunneled_offload_en value. For that, the tunneled_offload_en bit should be set/unset (according to the HW capability) for all TIRs', not only the ones dedicated for tunneled (inner) traffic. Fixes: 1b223dd39162 ("net/mlx5e: Fix checksum handling for non-stripped vlan packets") Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 5 ++++- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 1 + 4 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 7e0c3d4de108..3a183d690e23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -240,6 +240,7 @@ struct mlx5e_params { bool rx_cqe_compress_def; struct net_dim_cq_moder rx_cq_moderation; struct net_dim_cq_moder tx_cq_moderation; + bool tunneled_offload_en; bool lro_en; u8 tx_min_inline_mode; bool vlan_strip_disable; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d713ab2e7a2d..457cc39423f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3100,6 +3100,8 @@ static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv, MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, rqtn); + MLX5_SET(tirc, tirc, tunneled_offload_en, + priv->channels.params.tunneled_offload_en); mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); } @@ -3126,7 +3128,6 @@ static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc); mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &tirc_default_config[tt], tirc, true); - MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1); } int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) @@ -4572,6 +4573,8 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, /* RSS */ mlx5e_build_rss_params(rss_params, params->num_channels); + params->tunneled_offload_en = + mlx5e_tunnel_inner_ft_supported(mdev); } static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2ebca9bd5cf8..91e24f1cead8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1375,6 +1375,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev) mlx5e_set_rx_cq_mode_params(params, cq_period_mode); params->num_tc = 1; + params->tunneled_offload_en = false; mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 9b03ae1e1e10..ada1b7c0e0b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -68,6 +68,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, params->lro_en = false; params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; + params->tunneled_offload_en = false; } /* Called directly after IPoIB netdevice was created to initialize SW structs */ -- cgit v1.2.3-55-g7522 From 184867373d8c6bc3f8b3e4c8d9c8ef0fb5ce2340 Mon Sep 17 00:00:00 2001 From: Eli Britstein Date: Mon, 4 Mar 2019 05:01:12 +0000 Subject: net/mlx5e: ACLs for priority tag mode Current ConnectX HW is unable to perform VLAN pop in TX path and VLAN push on RX path. As a workaround, untagged packets are tagged with VID 0x000 allowing pop/push actions to be exchanged with VLAN rewrite actions. Use the ingress ACL table, preceding the FDB, to push VLAN 0x000 ID tag for untagged packets and the egress ACL table, succeeding the FDB, to pop VLAN 0x000 ID tag. Signed-off-by: Eli Britstein Reviewed-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 24 +-- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 12 ++ .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 169 +++++++++++++++++++++ 3 files changed, 193 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 8a67fd197b79..f0ef4ac51b45 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -922,8 +922,8 @@ static void esw_vport_change_handler(struct work_struct *work) mutex_unlock(&esw->state_lock); } -static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_group *vlan_grp = NULL; @@ -1006,8 +1006,8 @@ out: return err; } -static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) mlx5_del_flow_rules(vport->egress.allowed_vlan); @@ -1019,8 +1019,8 @@ static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, vport->egress.drop_rule = NULL; } -static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { if (IS_ERR_OR_NULL(vport->egress.acl)) return; @@ -1036,8 +1036,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, vport->egress.acl = NULL; } -static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_core_dev *dev = esw->dev; @@ -1168,8 +1168,8 @@ out: return err; } -static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { if (!IS_ERR_OR_NULL(vport->ingress.drop_rule)) mlx5_del_flow_rules(vport->ingress.drop_rule); @@ -1181,8 +1181,8 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, vport->ingress.allow_rule = NULL; } -static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { if (IS_ERR_OR_NULL(vport->ingress.acl)) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 9ecc8c9eee39..424d45cf9454 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -227,6 +227,18 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, int total_nvports); void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw); +void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); +int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); +void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); +int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); +void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); +void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 840630305ede..f371e79cbc9f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1601,6 +1601,167 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); } +static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_spec *spec; + int err = 0; + + /* For prio tag mode, there is only 1 FTEs: + * 1) Untagged packets - push prio tag VLAN, allow + * Unmatched traffic is allowed by default + */ + + if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) + return -EOPNOTSUPP; + + esw_vport_cleanup_ingress_rules(esw, vport); + + err = esw_vport_enable_ingress_acl(esw, vport); + if (err) { + mlx5_core_warn(esw->dev, + "failed to enable prio tag ingress acl (%d) on vport[%d]\n", + err, vport->vport); + return err; + } + + esw_debug(esw->dev, + "vport[%d] configure ingress rules\n", vport->vport); + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) { + err = -ENOMEM; + goto out_no_mem; + } + + /* Untagged packets - push prio tag VLAN, allow */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0); + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | + MLX5_FLOW_CONTEXT_ACTION_ALLOW; + flow_act.vlan[0].ethtype = ETH_P_8021Q; + flow_act.vlan[0].vid = 0; + flow_act.vlan[0].prio = 0; + vport->ingress.allow_rule = + mlx5_add_flow_rules(vport->ingress.acl, spec, + &flow_act, NULL, 0); + if (IS_ERR(vport->ingress.allow_rule)) { + err = PTR_ERR(vport->ingress.allow_rule); + esw_warn(esw->dev, + "vport[%d] configure ingress untagged allow rule, err(%d)\n", + vport->vport, err); + vport->ingress.allow_rule = NULL; + goto out; + } + +out: + kvfree(spec); +out_no_mem: + if (err) + esw_vport_cleanup_ingress_rules(esw, vport); + return err; +} + +static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_spec *spec; + int err = 0; + + /* For prio tag mode, there is only 1 FTEs: + * 1) prio tag packets - pop the prio tag VLAN, allow + * Unmatched traffic is allowed by default + */ + + esw_vport_cleanup_egress_rules(esw, vport); + + err = esw_vport_enable_egress_acl(esw, vport); + if (err) { + mlx5_core_warn(esw->dev, + "failed to enable egress acl (%d) on vport[%d]\n", + err, vport->vport); + return err; + } + + esw_debug(esw->dev, + "vport[%d] configure prio tag egress rules\n", vport->vport); + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) { + err = -ENOMEM; + goto out_no_mem; + } + + /* prio tag vlan rule - pop it so VF receives untagged packets */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0); + + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | + MLX5_FLOW_CONTEXT_ACTION_ALLOW; + vport->egress.allowed_vlan = + mlx5_add_flow_rules(vport->egress.acl, spec, + &flow_act, NULL, 0); + if (IS_ERR(vport->egress.allowed_vlan)) { + err = PTR_ERR(vport->egress.allowed_vlan); + esw_warn(esw->dev, + "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n", + vport->vport, err); + vport->egress.allowed_vlan = NULL; + goto out; + } + +out: + kvfree(spec); +out_no_mem: + if (err) + esw_vport_cleanup_egress_rules(esw, vport); + return err; +} + +static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports) +{ + int i, j; + int err; + + mlx5_esw_for_each_vf_vport(esw, i, nvports) { + err = esw_vport_ingress_prio_tag_config(esw, &esw->vports[i]); + if (err) + goto err_ingress; + err = esw_vport_egress_prio_tag_config(esw, &esw->vports[i]); + if (err) + goto err_egress; + } + + return 0; + +err_egress: + esw_vport_disable_ingress_acl(esw, &esw->vports[i]); +err_ingress: + mlx5_esw_for_each_vf_vport_reverse(esw, j, i - 1) { + esw_vport_disable_egress_acl(esw, &esw->vports[j]); + esw_vport_disable_ingress_acl(esw, &esw->vports[j]); + } + + return err; +} + +static void esw_prio_tag_acls_cleanup(struct mlx5_eswitch *esw) +{ + int i; + + mlx5_esw_for_each_vf_vport(esw, i, esw->nvports) { + esw_vport_disable_egress_acl(esw, &esw->vports[i]); + esw_vport_disable_ingress_acl(esw, &esw->vports[i]); + } +} + static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports) { int err; @@ -1608,6 +1769,12 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports) memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); + if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { + err = esw_prio_tag_acls_config(esw, nvports); + if (err) + return err; + } + err = esw_create_offloads_fdb_tables(esw, nvports); if (err) return err; @@ -1636,6 +1803,8 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) esw_destroy_vport_rx_group(esw); esw_destroy_offloads_table(esw); esw_destroy_offloads_fdb_tables(esw); + if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) + esw_prio_tag_acls_cleanup(esw); } static void esw_host_params_event_handler(struct work_struct *work) -- cgit v1.2.3-55-g7522 From 0bac1194539753eca1c0fd9aca7a1764356bdc9f Mon Sep 17 00:00:00 2001 From: Eli Britstein Date: Mon, 4 Mar 2019 13:50:38 +0000 Subject: net/mlx5e: Replace TC VLAN pop with VLAN 0 rewrite in prio tag mode Current ConnectX HW is unable to perform VLAN pop in TX path and VLAN push on RX path. To workaround that limitation untagged packets are tagged with VLAN ID 0x000 (priority tag) and pop/push actions are replaced by VLAN re-write actions (which are supported by the HW). Replace TC VLAN pop action with a VLAN priority tag header rewrite. Signed-off-by: Eli Britstein Reviewed-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 36 +++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 72891cc7f32a..c79db55f8a76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2436,6 +2436,30 @@ static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace, return err; } +static int +add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + u32 *action, struct netlink_ext_ack *extack) +{ + const struct flow_action_entry prio_tag_act = { + .vlan.vid = 0, + .vlan.prio = + MLX5_GET(fte_match_set_lyr_2_4, + get_match_headers_value(*action, + &parse_attr->spec), + first_prio) & + MLX5_GET(fte_match_set_lyr_2_4, + get_match_headers_criteria(*action, + &parse_attr->spec), + first_prio), + }; + + return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, + &prio_tag_act, parse_attr, hdrs, action, + extack); +} + static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, struct mlx5e_tc_flow_parse_attr *parse_attr, @@ -2947,6 +2971,18 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, } } + if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && + action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { + /* For prio tag mode, replace vlan pop with rewrite vlan prio + * tag rewrite. + */ + action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs, + &action, extack); + if (err) + return err; + } + if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB, -- cgit v1.2.3-55-g7522 From 0e1c1a2fcfcb6b91db98a60711fa14f9f69a383c Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Mon, 15 Apr 2019 10:10:02 +0300 Subject: net/mlx5e: Return error when trying to insert existing flower filter With unlocked TC it is possible to have spurious deletes and inserts of same filter. TC layer needs drivers to always return error when flow insertion failed in order to correctly calculate "in_hw_count" for each filter. Fix mlx5e_configure_flower() to return -EEXIST when TC tries to insert a filter that is already provisioned to the driver. Signed-off-by: Vlad Buslov Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index c79db55f8a76..122f457091a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3364,6 +3364,7 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie); + err = -EEXIST; goto out; } -- cgit v1.2.3-55-g7522 From 0e1a2a3e6e7d37cea9f8586f6d7745b539147d9f Mon Sep 17 00:00:00 2001 From: Erez Alfasi Date: Tue, 5 Mar 2019 15:42:23 +0200 Subject: ethtool: Add SFF-8436 and SFF-8636 max EEPROM length definitions Added max EEPROM length defines for ethtool usage: #define ETH_MODULE_SFF_8636_MAX_LEN 640 #define ETH_MODULE_SFF_8436_MAX_LEN 640 These definitions are used to determine the EEPROM data length when reading high eeprom pages. For example, SFF-8636 EEPROM data from page 03h needs to be stored at data[512] - data[639]. Signed-off-by: Erez Alfasi Signed-off-by: Saeed Mahameed --- include/uapi/linux/ethtool.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 818ad368b586..3534ce157ae9 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1712,6 +1712,9 @@ static inline int ethtool_validate_duplex(__u8 duplex) #define ETH_MODULE_SFF_8436 0x4 #define ETH_MODULE_SFF_8436_LEN 256 +#define ETH_MODULE_SFF_8636_MAX_LEN 640 +#define ETH_MODULE_SFF_8436_MAX_LEN 640 + /* Reset flags */ /* The reset() operation must clear the flags for the components which * were actually reset. On successful return, the flags indicate the -- cgit v1.2.3-55-g7522 From a708fb7b1f8dcc7a8ed949839958cd5d812dd939 Mon Sep 17 00:00:00 2001 From: Erez Alfasi Date: Thu, 21 Mar 2019 15:02:13 +0200 Subject: net/mlx5e: ethtool, Add support for EEPROM high pages query Add the support to read additional EEPROM information from high pages. Information for modules such as SFF-8436 and SFF-8636: 1) Application select table 2) User writable EEPROM 3) Thresholds and alarms Signed-off-by: Erez Alfasi Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 8 ++--- drivers/net/ethernet/mellanox/mlx5/core/port.c | 40 ++++++++++++++++++---- include/linux/mlx5/port.h | 1 + 3 files changed, 39 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 78dc8fe2a83c..7efaa58ae034 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1561,7 +1561,7 @@ static int mlx5e_get_module_info(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *dev = priv->mdev; int size_read = 0; - u8 data[4]; + u8 data[4] = {0}; size_read = mlx5_query_module_eeprom(dev, 0, 2, data); if (size_read < 2) @@ -1571,17 +1571,17 @@ static int mlx5e_get_module_info(struct net_device *netdev, switch (data[0]) { case MLX5_MODULE_ID_QSFP: modinfo->type = ETH_MODULE_SFF_8436; - modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; break; case MLX5_MODULE_ID_QSFP_PLUS: case MLX5_MODULE_ID_QSFP28: /* data[1] = revision id */ if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) { modinfo->type = ETH_MODULE_SFF_8636; - modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; } else { modinfo->type = ETH_MODULE_SFF_8436; - modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; } break; case MLX5_MODULE_ID_SFP: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 361468e0435d..cc262b30aed5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -293,15 +293,36 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) return 0; } +static int mlx5_eeprom_page(int offset) +{ + if (offset < MLX5_EEPROM_PAGE_LENGTH) + /* Addresses between 0-255 - page 00 */ + return 0; + + /* Addresses between 256 - 639 belongs to pages 01, 02 and 03 + * For example, offset = 400 belongs to page 02: + * 1 + ((400 - 256)/128) = 2 + */ + return 1 + ((offset - MLX5_EEPROM_PAGE_LENGTH) / + MLX5_EEPROM_HIGH_PAGE_LENGTH); +} + +static int mlx5_eeprom_high_page_offset(int page_num) +{ + if (!page_num) /* Page 0 always start from low page */ + return 0; + + /* High page */ + return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH; +} + int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, u16 offset, u16 size, u8 *data) { + int module_num, page_num, status, err; u32 out[MLX5_ST_SZ_DW(mcia_reg)]; u32 in[MLX5_ST_SZ_DW(mcia_reg)]; - int module_num; u16 i2c_addr; - int status; - int err; void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); err = mlx5_query_module_num(dev, &module_num); @@ -311,8 +332,15 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, memset(in, 0, sizeof(in)); size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); - if (offset < MLX5_EEPROM_PAGE_LENGTH && - offset + size > MLX5_EEPROM_PAGE_LENGTH) + /* Get the page number related to the given offset */ + page_num = mlx5_eeprom_page(offset); + + /* Set the right offset according to the page number, + * For page_num > 0, relative offset is always >= 128 (high page). + */ + offset -= mlx5_eeprom_high_page_offset(page_num); + + if (offset + size > MLX5_EEPROM_PAGE_LENGTH) /* Cross pages read, read until offset 256 in low page */ size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; @@ -321,7 +349,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, module, module_num); MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr); - MLX5_SET(mcia_reg, in, page_number, 0); + MLX5_SET(mcia_reg, in, page_number, page_num); MLX5_SET(mcia_reg, in, device_address, offset); MLX5_SET(mcia_reg, in, size, size); diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 64e78394fc9c..de9a272c9f3d 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -60,6 +60,7 @@ enum mlx5_an_status { #define MLX5_I2C_ADDR_LOW 0x50 #define MLX5_I2C_ADDR_HIGH 0x51 #define MLX5_EEPROM_PAGE_LENGTH 256 +#define MLX5_EEPROM_HIGH_PAGE_LENGTH 128 enum mlx5e_link_mode { MLX5E_1000BASE_CX_SGMII = 0, -- cgit v1.2.3-55-g7522 From 33e10924a0cead92b0ef9d71165eaeb849ec108f Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Fri, 1 Mar 2019 13:12:55 +0200 Subject: net/mlx5e: Put the common XDP code into a function The same code that returns XDP frames and releases pages is used both in mlx5e_poll_xdpsq_cq and mlx5e_free_xdpsq_descs. Create a function that cleans up an MPWQE. Signed-off-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 63 ++++++++++-------------- 1 file changed, 27 insertions(+), 36 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 399957104f9d..eb8ef78e5626 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -275,12 +275,33 @@ static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info * return true; } +static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, + struct mlx5e_xdp_wqe_info *wi, + struct mlx5e_rq *rq, + bool recycle) +{ + struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; + u16 i; + + for (i = 0; i < wi->num_pkts; i++) { + struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); + + if (rq) { + /* XDP_TX */ + mlx5e_page_release(rq, &xdpi.di, recycle); + } else { + /* XDP_REDIRECT */ + dma_unmap_single(sq->pdev, xdpi.dma_addr, + xdpi.xdpf->len, DMA_TO_DEVICE); + xdp_return_frame(xdpi.xdpf); + } + } +} + bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) { - struct mlx5e_xdp_info_fifo *xdpi_fifo; struct mlx5e_xdpsq *sq; struct mlx5_cqe64 *cqe; - bool is_redirect; u16 sqcc; int i; @@ -293,9 +314,6 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) if (!cqe) return false; - is_redirect = !rq; - xdpi_fifo = &sq->db.xdpi_fifo; - /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), * otherwise a cq overrun may occur */ @@ -317,7 +335,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) do { struct mlx5e_xdp_wqe_info *wi; - u16 ci, j; + u16 ci; last_wqe = (sqcc == wqe_counter); ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); @@ -325,19 +343,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) sqcc += wi->num_wqebbs; - for (j = 0; j < wi->num_pkts; j++) { - struct mlx5e_xdp_info xdpi = - mlx5e_xdpi_fifo_pop(xdpi_fifo); - - if (is_redirect) { - dma_unmap_single(sq->pdev, xdpi.dma_addr, - xdpi.xdpf->len, DMA_TO_DEVICE); - xdp_return_frame(xdpi.xdpf); - } else { - /* Recycle RX page */ - mlx5e_page_release(rq, &xdpi.di, true); - } - } + mlx5e_free_xdpsq_desc(sq, wi, rq, true); } while (!last_wqe); } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); @@ -354,31 +360,16 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq) { - struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; - bool is_redirect = !rq; - while (sq->cc != sq->pc) { struct mlx5e_xdp_wqe_info *wi; - u16 ci, i; + u16 ci; ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); wi = &sq->db.wqe_info[ci]; sq->cc += wi->num_wqebbs; - for (i = 0; i < wi->num_pkts; i++) { - struct mlx5e_xdp_info xdpi = - mlx5e_xdpi_fifo_pop(xdpi_fifo); - - if (is_redirect) { - dma_unmap_single(sq->pdev, xdpi.dma_addr, - xdpi.xdpf->len, DMA_TO_DEVICE); - xdp_return_frame(xdpi.xdpf); - } else { - /* Recycle RX page */ - mlx5e_page_release(rq, &xdpi.di, false); - } - } + mlx5e_free_xdpsq_desc(sq, wi, rq, false); } } -- cgit v1.2.3-55-g7522 From 0bdddcea5be6d619a047ad0ef3252a12725e3928 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 19 Apr 2019 14:17:31 +0900 Subject: net/mlx5e: remove meaningless CFLAGS_tracepoint.o CFLAGS_tracepoint.o specifies CFLAGS for compiling tracepoint.c but it does not exist under drivers/net/ethernet/mellanox/mlx5/core/. CFLAGS_tracepoint.o is unused. Signed-off-by: Masahiro Yamada Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 12278f024ffc..243368dc23db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -58,5 +58,3 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ en_accel/ipsec_stats.o mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o - -CFLAGS_tracepoint.o := -I$(src) -- cgit v1.2.3-55-g7522 From c9bbfb378bc35fcd0b51e4a8950bd50447f39832 Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Fri, 12 Apr 2019 16:14:03 -0500 Subject: net/mlx5: Remove unused mlx5_query_nic_vport_vlans mlx5_query_nic_vport_vlans() is not used anymore. Hence remove it. This patch doesn't change any functionality. Signed-off-by: Bodong Wang Reviewed-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 61 ------------------------- include/linux/mlx5/vport.h | 4 -- 2 files changed, 65 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index ef95feca9961..95cdc8cbcba4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -371,67 +371,6 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list); -int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, - u16 vport, - u16 vlans[], - int *size) -{ - u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; - void *nic_vport_ctx; - int req_list_size; - int max_list_size; - int out_sz; - void *out; - int err; - int i; - - req_list_size = *size; - max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list); - if (req_list_size > max_list_size) { - mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n", - req_list_size, max_list_size); - req_list_size = max_list_size; - } - - out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + - req_list_size * MLX5_ST_SZ_BYTES(vlan_layout); - - memset(in, 0, sizeof(in)); - out = kzalloc(out_sz, GFP_KERNEL); - if (!out) - return -ENOMEM; - - MLX5_SET(query_nic_vport_context_in, in, opcode, - MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); - MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, - MLX5_NVPRT_LIST_TYPE_VLAN); - MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); - - if (vport) - MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); - - err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); - if (err) - goto out; - - nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, - nic_vport_context); - req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx, - allowed_list_size); - - *size = req_list_size; - for (i = 0; i < req_list_size; i++) { - void *vlan_addr = MLX5_ADDR_OF(nic_vport_context, - nic_vport_ctx, - current_uc_mac_address[i]); - vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan); - } -out: - kfree(out); - return err; -} -EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans); - int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, u16 vlans[], int list_size) diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 0eef548b9946..3d1c6cdbbba7 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -118,10 +118,6 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, int promisc_uc, int promisc_mc, int promisc_all); -int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, - u16 vport, - u16 vlans[], - int *size); int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, u16 vlans[], int list_size); -- cgit v1.2.3-55-g7522 From 786ef904b43b9ddb675f55ef05afad5f07fb49d0 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 21 Apr 2019 00:36:21 -0500 Subject: net/mlx5: Reuse mlx5_esw_for_each_vf_vport macro in two files Currently mlx5_esw_for_each_vf_vport iterates over mlx5_vport entries in eswitch.c Same macro in eswitch_offloads.c iterates over vport number in eswitch_offloads.c Instead of duplicate macro names, to avoid confusion and to reuse the same macro in both files, move it to eswitch.h. To iterate over vport numbers where there is no need to iterate over mlx5_vport, but only a vport number is needed, rename those macros in eswitch_offloads.c to mlx5_esw_for_each_vf_num_vport*. While at it, keep all vport and vport rep iterators together. Signed-off-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 13 ----- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 42 +++++++++++++++++ .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 55 +++++++--------------- 3 files changed, 58 insertions(+), 52 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f0ef4ac51b45..01dc89e9f91d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -72,19 +72,6 @@ static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw); MC_ADDR_CHANGE | \ PROMISC_CHANGE) -/* The vport getter/iterator are only valid after esw->total_vports - * and vport->vport are initialized in mlx5_eswitch_init. - */ -#define mlx5_esw_for_all_vports(esw, i, vport) \ - for ((i) = MLX5_VPORT_PF; \ - (vport) = &(esw)->vports[i], \ - (i) < (esw)->total_vports; (i)++) - -#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ - for ((i) = MLX5_VPORT_FIRST_VF; \ - (vport) = &(esw)->vports[i], \ - (i) <= (nvfs); (i)++) - static struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 424d45cf9454..fc512a5d0c4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -446,6 +446,48 @@ static inline int mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); +/* The vport getter/iterator are only valid after esw->total_vports + * and vport->vport are initialized in mlx5_eswitch_init. + */ +#define mlx5_esw_for_all_vports(esw, i, vport) \ + for ((i) = MLX5_VPORT_PF; \ + (vport) = &(esw)->vports[i], \ + (i) < (esw)->total_vports; (i)++) + +#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ + for ((i) = MLX5_VPORT_FIRST_VF; \ + (vport) = &(esw)->vports[(i)], \ + (i) <= (nvfs); (i)++) + +#define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \ + for ((i) = (nvfs); \ + (vport) = &(esw)->vports[(i)], \ + (i) >= MLX5_VPORT_FIRST_VF; (i)--) + +/* The rep getter/iterator are only valid after esw->total_vports + * and vport->vport are initialized in mlx5_eswitch_init. + */ +#define mlx5_esw_for_all_reps(esw, i, rep) \ + for ((i) = MLX5_VPORT_PF; \ + (rep) = &(esw)->offloads.vport_reps[i], \ + (i) < (esw)->total_vports; (i)++) + +#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \ + for ((i) = MLX5_VPORT_FIRST_VF; \ + (rep) = &(esw)->offloads.vport_reps[i], \ + (i) <= (nvfs); (i)++) + +#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \ + for ((i) = (nvfs); \ + (rep) = &(esw)->offloads.vport_reps[i], \ + (i) >= MLX5_VPORT_FIRST_VF; (i)--) + +#define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \ + for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++) + +#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \ + for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f371e79cbc9f..e88feaa293f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -54,32 +54,6 @@ #define UPLINK_REP_INDEX 0 -/* The rep getter/iterator are only valid after esw->total_vports - * and vport->vport are initialized in mlx5_eswitch_init. - */ -#define mlx5_esw_for_all_reps(esw, i, rep) \ - for ((i) = MLX5_VPORT_PF; \ - (rep) = &(esw)->offloads.vport_reps[i], \ - (i) < (esw)->total_vports; (i)++) - -#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \ - for ((i) = MLX5_VPORT_FIRST_VF; \ - (rep) = &(esw)->offloads.vport_reps[i], \ - (i) <= (nvfs); (i)++) - -#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \ - for ((i) = (nvfs); \ - (rep) = &(esw)->offloads.vport_reps[i], \ - (i) >= MLX5_VPORT_FIRST_VF; (i)--) - -#define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \ - for ((vport) = MLX5_VPORT_FIRST_VF; \ - (vport) <= (nvfs); (vport)++) - -#define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \ - for ((vport) = (nvfs); \ - (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) - static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, u16 vport_num) { @@ -659,7 +633,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, flows[mlx5_eswitch_ecpf_idx(esw)] = flow; } - mlx5_esw_for_each_vf_vport(esw, i, mlx5_core_max_vfs(esw->dev)) { + mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) { MLX5_SET(fte_match_set_misc, misc, source_port, i); flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); @@ -677,7 +651,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, add_vf_flow_err: nvports = --i; - mlx5_esw_for_each_vf_vport_reverse(esw, i, nvports) + mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports) mlx5_del_flow_rules(flows[i]); if (mlx5_ecpf_vport_exists(esw->dev)) @@ -700,7 +674,8 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw) flows = esw->fdb_table.offloads.peer_miss_rules; - mlx5_esw_for_each_vf_vport_reverse(esw, i, mlx5_core_max_vfs(esw->dev)) + mlx5_esw_for_each_vf_vport_num_reverse(esw, i, + mlx5_core_max_vfs(esw->dev)) mlx5_del_flow_rules(flows[i]); if (mlx5_ecpf_vport_exists(esw->dev)) @@ -1727,14 +1702,15 @@ out_no_mem: static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports) { + struct mlx5_vport *vport = NULL; int i, j; int err; - mlx5_esw_for_each_vf_vport(esw, i, nvports) { - err = esw_vport_ingress_prio_tag_config(esw, &esw->vports[i]); + mlx5_esw_for_each_vf_vport(esw, i, vport, nvports) { + err = esw_vport_ingress_prio_tag_config(esw, vport); if (err) goto err_ingress; - err = esw_vport_egress_prio_tag_config(esw, &esw->vports[i]); + err = esw_vport_egress_prio_tag_config(esw, vport); if (err) goto err_egress; } @@ -1742,11 +1718,11 @@ static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports) return 0; err_egress: - esw_vport_disable_ingress_acl(esw, &esw->vports[i]); + esw_vport_disable_ingress_acl(esw, vport); err_ingress: - mlx5_esw_for_each_vf_vport_reverse(esw, j, i - 1) { - esw_vport_disable_egress_acl(esw, &esw->vports[j]); - esw_vport_disable_ingress_acl(esw, &esw->vports[j]); + mlx5_esw_for_each_vf_vport_reverse(esw, j, vport, i - 1) { + esw_vport_disable_egress_acl(esw, vport); + esw_vport_disable_ingress_acl(esw, vport); } return err; @@ -1754,11 +1730,12 @@ err_ingress: static void esw_prio_tag_acls_cleanup(struct mlx5_eswitch *esw) { + struct mlx5_vport *vport; int i; - mlx5_esw_for_each_vf_vport(esw, i, esw->nvports) { - esw_vport_disable_egress_acl(esw, &esw->vports[i]); - esw_vport_disable_ingress_acl(esw, &esw->vports[i]); + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->nvports) { + esw_vport_disable_egress_acl(esw, vport); + esw_vport_disable_ingress_acl(esw, vport); } } -- cgit v1.2.3-55-g7522 From ee813f314b2486549cd06e9e2c4d56ccab005609 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 21 Apr 2019 20:34:45 -0500 Subject: net/mlx5: Use available mlx5_vport struct Several functions need to access mlx5_vport and vport_num. When these functions are called, caller already has mlx5_vport* available. Hence pass such mlx5_vport pointer. This is preparation patch to add error checks to mlx5_eswitch_get_vport() and to return error status. By doing so, reduce places where error check of mlx5_eswitch_get_vport() can be avoided. While doing such change, mlx5_eswitch_query_vport_drop_stats() gets corrected to work on vport, instead of vport_idx. Signed-off-by: Parav Pandit Reviewed-by: Bodong Wang Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 109 ++++++++++------------ 1 file changed, 51 insertions(+), 58 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 01dc89e9f91d..41afc7bf8bd8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -631,9 +631,8 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) /* Apply vport UC/MC list to HW l2 table and FDB table */ static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, - u16 vport_num, int list_type) + struct mlx5_vport *vport, int list_type) { - struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; vport_addr_action vport_addr_add; vport_addr_action vport_addr_del; @@ -666,9 +665,8 @@ static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, /* Sync vport UC/MC list from vport context */ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, - u16 vport_num, int list_type) + struct mlx5_vport *vport, int list_type) { - struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; u8 (*mac_list)[ETH_ALEN]; struct l2addr_node *node; @@ -697,12 +695,12 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, if (!vport->enabled) goto out; - err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type, + err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type, mac_list, &size); if (err) goto out; esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n", - vport_num, is_uc ? "UC" : "MC", size); + vport->vport, is_uc ? "UC" : "MC", size); for (i = 0; i < size; i++) { if (is_uc && !is_valid_ether_addr(mac_list[i])) @@ -740,10 +738,10 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, if (!addr) { esw_warn(esw->dev, "Failed to add MAC(%pM) to vport[%d] DB\n", - mac_list[i], vport_num); + mac_list[i], vport->vport); continue; } - addr->vport = vport_num; + addr->vport = vport->vport; addr->action = MLX5_ACTION_ADD; } out: @@ -753,9 +751,9 @@ out: /* Sync vport UC/MC list from vport context * Must be called after esw_update_vport_addr_list */ -static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num) +static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); struct l2addr_node *node; struct vport_addr *addr; struct hlist_head *hash; @@ -778,20 +776,20 @@ static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num) if (!addr) { esw_warn(esw->dev, "Failed to add allmulti MAC(%pM) to vport[%d] DB\n", - mac, vport_num); + mac, vport->vport); continue; } - addr->vport = vport_num; + addr->vport = vport->vport; addr->action = MLX5_ACTION_ADD; addr->mc_promisc = true; } } /* Apply vport rx mode to HW FDB table */ -static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num, +static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, bool promisc, bool mc_promisc) { - struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); struct esw_mc_addr *allmulti_addr = &esw->mc_promisc; if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc) @@ -799,7 +797,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num, if (mc_promisc) { vport->allmulti_rule = - esw_fdb_set_vport_allmulti_rule(esw, vport_num); + esw_fdb_set_vport_allmulti_rule(esw, vport->vport); if (!allmulti_addr->uplink_rule) allmulti_addr->uplink_rule = esw_fdb_set_vport_allmulti_rule(esw, @@ -822,8 +820,8 @@ promisc: return; if (promisc) { - vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw, - vport_num); + vport->promisc_rule = + esw_fdb_set_vport_promisc_rule(esw, vport->vport); } else if (vport->promisc_rule) { mlx5_del_flow_rules(vport->promisc_rule); vport->promisc_rule = NULL; @@ -831,23 +829,23 @@ promisc: } /* Sync vport rx mode from vport context */ -static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num) +static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); int promisc_all = 0; int promisc_uc = 0; int promisc_mc = 0; int err; err = mlx5_query_nic_vport_promisc(esw->dev, - vport_num, + vport->vport, &promisc_uc, &promisc_mc, &promisc_all); if (err) return; esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n", - vport_num, promisc_all, promisc_mc); + vport->vport, promisc_all, promisc_mc); if (!vport->info.trusted || !vport->enabled) { promisc_uc = 0; @@ -855,7 +853,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num) promisc_all = 0; } - esw_apply_vport_rx_mode(esw, vport_num, promisc_all, + esw_apply_vport_rx_mode(esw, vport, promisc_all, (promisc_all || promisc_mc)); } @@ -870,27 +868,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport) vport->vport, mac); if (vport->enabled_events & UC_ADDR_CHANGE) { - esw_update_vport_addr_list(esw, vport->vport, - MLX5_NVPRT_LIST_TYPE_UC); - esw_apply_vport_addr_list(esw, vport->vport, - MLX5_NVPRT_LIST_TYPE_UC); + esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); + esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); } - if (vport->enabled_events & MC_ADDR_CHANGE) { - esw_update_vport_addr_list(esw, vport->vport, - MLX5_NVPRT_LIST_TYPE_MC); - } + if (vport->enabled_events & MC_ADDR_CHANGE) + esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); if (vport->enabled_events & PROMISC_CHANGE) { - esw_update_vport_rx_mode(esw, vport->vport); + esw_update_vport_rx_mode(esw, vport); if (!IS_ERR_OR_NULL(vport->allmulti_rule)) - esw_update_vport_mc_promisc(esw, vport->vport); + esw_update_vport_mc_promisc(esw, vport); } - if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) { - esw_apply_vport_addr_list(esw, vport->vport, - MLX5_NVPRT_LIST_TYPE_MC); - } + if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) + esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport); if (vport->enabled) @@ -1407,10 +1399,10 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw) esw->qos.enabled = false; } -static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, +static int esw_vport_enable_qos(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, u32 initial_max_rate, u32 initial_bw_share) { - struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; struct mlx5_core_dev *dev = esw->dev; void *vport_elem; @@ -1427,7 +1419,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); - MLX5_SET(vport_element, vport_elem, vport_number, vport_num); + MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); MLX5_SET(scheduling_context, sched_ctx, parent_element_id, esw->qos.root_tsar_id); MLX5_SET(scheduling_context, sched_ctx, max_average_bw, @@ -1440,7 +1432,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, &vport->qos.esw_tsar_ix); if (err) { esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", - vport_num, err); + vport->vport, err); return err; } @@ -1448,10 +1440,10 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, return 0; } -static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) +static void esw_vport_disable_qos(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); - int err = 0; + int err; if (!vport->qos.enabled) return; @@ -1461,15 +1453,15 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) vport->qos.esw_tsar_ix); if (err) esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n", - vport_num, err); + vport->vport, err); vport->qos.enabled = false; } -static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, +static int esw_vport_qos_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, u32 max_rate, u32 bw_share) { - struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; struct mlx5_core_dev *dev = esw->dev; void *vport_elem; @@ -1486,7 +1478,7 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); - MLX5_SET(vport_element, vport_elem, vport_number, vport_num); + MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); MLX5_SET(scheduling_context, sched_ctx, parent_element_id, esw->qos.root_tsar_id); MLX5_SET(scheduling_context, sched_ctx, max_average_bw, @@ -1502,7 +1494,7 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, bitmask); if (err) { esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n", - vport_num, err); + vport->vport, err); return err; } @@ -1605,7 +1597,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, esw_apply_vport_conf(esw, vport); /* Attach vport to the eswitch rate limiter */ - if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate, + if (esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share)) esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num); @@ -1650,7 +1642,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, */ esw_vport_change_handle_locked(vport); vport->enabled_events = 0; - esw_vport_disable_qos(esw, vport_num); + esw_vport_disable_qos(esw, vport); if (esw->manager_vport != vport_num && esw->mode == SRIOV_LEGACY) { mlx5_modify_vport_admin_state(esw->dev, @@ -2271,7 +2263,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) if (bw_share == evport->qos.bw_share) continue; - err = esw_vport_qos_config(esw, evport->vport, vport_max_rate, + err = esw_vport_qos_config(esw, evport, vport_max_rate, bw_share); if (!err) evport->qos.bw_share = bw_share; @@ -2325,7 +2317,7 @@ set_max_rate: if (max_rate == evport->info.max_rate) goto unlock; - err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share); + err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share); if (!err) evport->info.max_rate = max_rate; @@ -2335,11 +2327,10 @@ unlock: } static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, - int vport_idx, + struct mlx5_vport *vport, struct mlx5_vport_drop_stats *stats) { struct mlx5_eswitch *esw = dev->priv.eswitch; - struct mlx5_vport *vport = &esw->vports[vport_idx]; u64 rx_discard_vport_down, tx_discard_vport_down; u64 bytes = 0; int err = 0; @@ -2359,7 +2350,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, !MLX5_CAP_GEN(dev, transmit_discard_vport_down)) return 0; - err = mlx5_query_vport_down_stats(dev, vport_idx, 1, + err = mlx5_query_vport_down_stats(dev, vport->vport, 1, &rx_discard_vport_down, &tx_discard_vport_down); if (err) @@ -2374,9 +2365,10 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, } int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, - int vport, + int vport_num, struct ifla_vf_stats *vf_stats) { + struct mlx5_vport *vport; int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; struct mlx5_vport_drop_stats stats = {0}; @@ -2385,9 +2377,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, if (!ESW_ALLOWED(esw)) return -EPERM; - if (!LEGAL_VPORT(esw, vport)) + if (!LEGAL_VPORT(esw, vport_num)) return -EINVAL; + vport = mlx5_eswitch_get_vport(esw, vport_num); out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -2395,7 +2388,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); MLX5_SET(query_vport_counter_in, in, op_mod, 0); - MLX5_SET(query_vport_counter_in, in, vport_number, vport); + MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport); MLX5_SET(query_vport_counter_in, in, other_vport, 1); memset(out, 0, outlen); -- cgit v1.2.3-55-g7522 From 4314ebaa1e423d398035cdb7d15c50defa0f48af Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Mon, 15 Apr 2019 17:36:43 -0500 Subject: net/mlx5: E-Switch, Use getter to access all vport array Some functions issue vport commands and access vport array using vport_index/vport_num interchangeably which is OK for VFs vports. However, this creates potential bug if those vports are not VFs (E.g, uplink, sf) where their vport_index don't equal to vport_num. Prepare code to access mlx5_vport structure using a getter function. Signed-off-by: Bodong Wang Signed-off-by: Vu Pham Reviewed-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 18 +++++++++--------- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 2 ++ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 41afc7bf8bd8..0107ce8bf659 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -72,8 +72,8 @@ static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw); MC_ADDR_CHANGE | \ PROMISC_CHANGE) -static struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, - u16 vport_num) +struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, + u16 vport_num) { u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); @@ -1916,7 +1916,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, return -EINVAL; mutex_lock(&esw->state_lock); - evport = &esw->vports[vport]; + evport = mlx5_eswitch_get_vport(esw, vport); if (evport->info.spoofchk && !is_valid_ether_addr(mac)) mlx5_core_warn(esw->dev, @@ -1960,7 +1960,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, return -EINVAL; mutex_lock(&esw->state_lock); - evport = &esw->vports[vport]; + evport = mlx5_eswitch_get_vport(esw, vport); err = mlx5_modify_vport_admin_state(esw->dev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, @@ -1989,7 +1989,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, if (!LEGAL_VPORT(esw, vport)) return -EINVAL; - evport = &esw->vports[vport]; + evport = mlx5_eswitch_get_vport(esw, vport); memset(ivi, 0, sizeof(*ivi)); ivi->vf = vport - 1; @@ -2020,7 +2020,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, return -EINVAL; mutex_lock(&esw->state_lock); - evport = &esw->vports[vport]; + evport = mlx5_eswitch_get_vport(esw, vport); err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); if (err) @@ -2064,7 +2064,7 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, return -EINVAL; mutex_lock(&esw->state_lock); - evport = &esw->vports[vport]; + evport = mlx5_eswitch_get_vport(esw, vport); pschk = evport->info.spoofchk; evport->info.spoofchk = spoofchk; if (pschk && !is_valid_ether_addr(evport->info.mac)) @@ -2213,7 +2213,7 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, return -EINVAL; mutex_lock(&esw->state_lock); - evport = &esw->vports[vport]; + evport = mlx5_eswitch_get_vport(esw, vport); evport->info.trusted = setting; if (evport->enabled) esw_vport_change_handle_locked(evport); @@ -2299,7 +2299,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, return -EOPNOTSUPP; mutex_lock(&esw->state_lock); - evport = &esw->vports[vport]; + evport = mlx5_eswitch_get_vport(esw, vport); if (min_rate == evport->info.min_rate) goto set_max_rate; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index fc512a5d0c4c..2e6b37d4fc7f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -488,6 +488,8 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); #define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \ for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) +struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, + u16 vport_num); #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } -- cgit v1.2.3-55-g7522 From 5d9986a3947a08185c407442c9a5fc9546b9e440 Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Mon, 15 Apr 2019 17:39:07 -0500 Subject: net/mlx5: E-Switch, Fix the check of legal vport The check of legal vport is to ensure the vport number falls between 0 and total number of vports. Along with the introduction of uplink rep, enabled vports are not consecutive any more. Therefore, rely on the eswitch vport getter function to check if it's a valid vport. As the getter function relies on eswitch, add the check of vport group manager and validation the presence of eswitch structure. Remove the redundant check in the function calls. Since the vport array will be allocated once eswitch is initialized and will be kept alive if eswitch presents, no need to protect it with the state lock. Fixes: 5ae5162066d8 ("net/mlx5: E-Switch, Assign a different position for uplink rep and vport") Signed-off-by: Bodong Wang Signed-off-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 86 +++++++++++------------ drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 5 +- 2 files changed, 46 insertions(+), 45 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 0107ce8bf659..9ea0ccfe5ef5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -72,12 +72,22 @@ static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw); MC_ADDR_CHANGE | \ PROMISC_CHANGE) -struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, - u16 vport_num) +struct mlx5_vport *__must_check +mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) { - u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); + u16 idx; + + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) + return ERR_PTR(-EPERM); + + idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); + + if (idx > esw->total_vports - 1) { + esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n", + vport_num, idx); + return ERR_PTR(-EINVAL); + } - WARN_ON(idx > esw->total_vports - 1); return &esw->vports[idx]; } @@ -1667,6 +1677,9 @@ static int eswitch_vport_event(struct notifier_block *nb, vport_num = be16_to_cpu(eqe->data.vport_change.vport_num); vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return NOTIFY_OK; + if (vport->enabled) queue_work(esw->work_queue, &vport->vport_change_handler); @@ -1901,22 +1914,19 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) } /* Vport Administration */ -#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) - int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int vport, u8 mac[ETH_ALEN]) { - struct mlx5_vport *evport; + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); u64 node_guid; int err = 0; - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) - return -EPERM; - if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) + if (IS_ERR(evport)) + return PTR_ERR(evport); + if (is_multicast_ether_addr(mac)) return -EINVAL; mutex_lock(&esw->state_lock); - evport = mlx5_eswitch_get_vport(esw, vport); if (evport->info.spoofchk && !is_valid_ether_addr(mac)) mlx5_core_warn(esw->dev, @@ -1951,16 +1961,15 @@ unlock: int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, int vport, int link_state) { - struct mlx5_vport *evport; + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); int err = 0; if (!ESW_ALLOWED(esw)) return -EPERM; - if (!LEGAL_VPORT(esw, vport)) - return -EINVAL; + if (IS_ERR(evport)) + return PTR_ERR(evport); mutex_lock(&esw->state_lock); - evport = mlx5_eswitch_get_vport(esw, vport); err = mlx5_modify_vport_admin_state(esw->dev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, @@ -1982,14 +1991,10 @@ unlock: int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi) { - struct mlx5_vport *evport; + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) - return -EPERM; - if (!LEGAL_VPORT(esw, vport)) - return -EINVAL; - - evport = mlx5_eswitch_get_vport(esw, vport); + if (IS_ERR(evport)) + return PTR_ERR(evport); memset(ivi, 0, sizeof(*ivi)); ivi->vf = vport - 1; @@ -2011,16 +2016,17 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int vport, u16 vlan, u8 qos, u8 set_flags) { - struct mlx5_vport *evport; + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); int err = 0; if (!ESW_ALLOWED(esw)) return -EPERM; - if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7)) + if (IS_ERR(evport)) + return PTR_ERR(evport); + if (vlan > 4095 || qos > 7) return -EINVAL; mutex_lock(&esw->state_lock); - evport = mlx5_eswitch_get_vport(esw, vport); err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); if (err) @@ -2054,17 +2060,16 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, int vport, bool spoofchk) { - struct mlx5_vport *evport; + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); bool pschk; int err = 0; if (!ESW_ALLOWED(esw)) return -EPERM; - if (!LEGAL_VPORT(esw, vport)) - return -EINVAL; + if (IS_ERR(evport)) + return PTR_ERR(evport); mutex_lock(&esw->state_lock); - evport = mlx5_eswitch_get_vport(esw, vport); pschk = evport->info.spoofchk; evport->info.spoofchk = spoofchk; if (pschk && !is_valid_ether_addr(evport->info.mac)) @@ -2205,15 +2210,14 @@ out: int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, int vport, bool setting) { - struct mlx5_vport *evport; + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); if (!ESW_ALLOWED(esw)) return -EPERM; - if (!LEGAL_VPORT(esw, vport)) - return -EINVAL; + if (IS_ERR(evport)) + return PTR_ERR(evport); mutex_lock(&esw->state_lock); - evport = mlx5_eswitch_get_vport(esw, vport); evport->info.trusted = setting; if (evport->enabled) esw_vport_change_handle_locked(evport); @@ -2277,7 +2281,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, u32 max_rate, u32 min_rate) { - struct mlx5_vport *evport; + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); u32 fw_max_bw_share; u32 previous_min_rate; u32 divider; @@ -2287,8 +2291,8 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, if (!ESW_ALLOWED(esw)) return -EPERM; - if (!LEGAL_VPORT(esw, vport)) - return -EINVAL; + if (IS_ERR(evport)) + return PTR_ERR(evport); fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && @@ -2299,7 +2303,6 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, return -EOPNOTSUPP; mutex_lock(&esw->state_lock); - evport = mlx5_eswitch_get_vport(esw, vport); if (min_rate == evport->info.min_rate) goto set_max_rate; @@ -2368,19 +2371,16 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport_num, struct ifla_vf_stats *vf_stats) { - struct mlx5_vport *vport; + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; struct mlx5_vport_drop_stats stats = {0}; int err = 0; u32 *out; - if (!ESW_ALLOWED(esw)) - return -EPERM; - if (!LEGAL_VPORT(esw, vport_num)) - return -EINVAL; + if (IS_ERR(vport)) + return PTR_ERR(vport); - vport = mlx5_eswitch_get_vport(esw, vport_num); out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 2e6b37d4fc7f..ed3fad689ec9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -488,8 +488,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); #define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \ for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) -struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, - u16 vport_num); +struct mlx5_vport *__must_check +mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } -- cgit v1.2.3-55-g7522 From 6f4e02193c9a9ea54dd3151cf97489fa787cd0e6 Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Thu, 18 Apr 2019 18:24:15 -0500 Subject: net/mlx5: E-Switch, Use atomic rep state to serialize state change When the state of rep was introduced, it was also designed to prevent duplicate unloading of the same rep. Considering the following two flows when an eswitch manager is at switchdev mode with n VF reps loaded. +--------------------------------------+--------------------------------+ | cpu-0 | cpu-1 | | -------- | -------- | | mlx5_ib_remove | mlx5_eswitch_disable_sriov | | mlx5_ib_unregister_vport_reps | esw_offloads_cleanup | | mlx5_eswitch_unregister_vport_reps | esw_offloads_unload_all_reps | | __unload_reps_all_vport | __unload_reps_all_vport | +--------------------------------------+--------------------------------+ These two flows will try to unload the same rep. Per original design, once one flow unloads the rep, the state moves to REGISTERED. The 2nd flow will no longer needs to do the unload and bails out. However, as read and write of the state is not atomic, when 1st flow is doing the unload, the state is still LOADED, 2nd flow is able to do the same unload action. Kernel crash will happen. To solve this, driver should do atomic test-and-set for the state. So that only one flow can change the rep state from LOADED to REGISTERED, and proceed to do the actual unloading. Since the state is changing to atomic type, all other read/write should be atomic action as well. Fixes: f121e0ea9586 (net/mlx5: E-Switch, Add state to eswitch vport representors) Signed-off-by: Bodong Wang Reviewed-by: Parav Pandit Reviewed-by: Vu Pham Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 36 ++++++++++------------ include/linux/mlx5/eswitch.h | 2 +- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index e88feaa293f6..e09ae27485ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -333,7 +333,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { rep = &esw->offloads.vport_reps[vf_vport]; - if (rep->rep_if[REP_ETH].state != REP_LOADED) + if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED) continue; err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); @@ -1277,7 +1277,8 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw) ether_addr_copy(rep->hw_id, hw_id); for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) - rep->rep_if[rep_type].state = REP_UNREGISTERED; + atomic_set(&rep->rep_if[rep_type].state, + REP_UNREGISTERED); } return 0; @@ -1286,11 +1287,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw) static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, u8 rep_type) { - if (rep->rep_if[rep_type].state != REP_LOADED) - return; - - rep->rep_if[rep_type].unload(rep); - rep->rep_if[rep_type].state = REP_REGISTERED; + if (atomic_cmpxchg(&rep->rep_if[rep_type].state, + REP_LOADED, REP_REGISTERED) == REP_LOADED) + rep->rep_if[rep_type].unload(rep); } static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type) @@ -1351,16 +1350,15 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, { int err = 0; - if (rep->rep_if[rep_type].state != REP_REGISTERED) - return 0; - - err = rep->rep_if[rep_type].load(esw->dev, rep); - if (err) - return err; - - rep->rep_if[rep_type].state = REP_LOADED; + if (atomic_cmpxchg(&rep->rep_if[rep_type].state, + REP_REGISTERED, REP_LOADED) == REP_REGISTERED) { + err = rep->rep_if[rep_type].load(esw->dev, rep); + if (err) + atomic_set(&rep->rep_if[rep_type].state, + REP_REGISTERED); + } - return 0; + return err; } static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type) @@ -2217,7 +2215,7 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, rep_if->get_proto_dev = __rep_if->get_proto_dev; rep_if->priv = __rep_if->priv; - rep_if->state = REP_REGISTERED; + atomic_set(&rep_if->state, REP_REGISTERED); } } EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); @@ -2232,7 +2230,7 @@ void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) __unload_reps_all_vport(esw, max_vf, rep_type); mlx5_esw_for_all_reps(esw, i, rep) - rep->rep_if[rep_type].state = REP_UNREGISTERED; + atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED); } EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); @@ -2252,7 +2250,7 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, rep = mlx5_eswitch_get_rep(esw, vport); - if (rep->rep_if[rep_type].state == REP_LOADED && + if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED && rep->rep_if[rep_type].get_proto_dev) return rep->rep_if[rep_type].get_proto_dev(rep); return NULL; diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 96d8435421de..0ca77dd1429c 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -35,7 +35,7 @@ struct mlx5_eswitch_rep_if { void (*unload)(struct mlx5_eswitch_rep *rep); void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); void *priv; - u8 state; + atomic_t state; }; struct mlx5_eswitch_rep { -- cgit v1.2.3-55-g7522