summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
diff options
context:
space:
mode:
authorBodong Wang2019-01-29 05:12:45 +0100
committerSaeed Mahameed2019-02-16 02:25:57 +0100
commit879c8f84e3602961e441723aaaac372a44dfe588 (patch)
tree4246864b89dc7d8c696789fbf923f81e694612d8 /drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
parentnet/mlx5: E-Switch, Split VF and special vports for offloads mode (diff)
downloadkernel-qcow2-linux-879c8f84e3602961e441723aaaac372a44dfe588.tar.gz
kernel-qcow2-linux-879c8f84e3602961e441723aaaac372a44dfe588.tar.xz
kernel-qcow2-linux-879c8f84e3602961e441723aaaac372a44dfe588.zip
net/mlx5: E-Switch, Use getter and iterator to access vport/rep
With only PF and VF, it is sufficient to have the vport/rep array index as the vport number. This is because PF and VF vports numbers are consecutive serial numbers. In downstream patches with introducing of ECPF and UPLINK vports, it's not consecutive any more. Use getter to get specific vport/rep, and use iterator to traversal a list of vport/rep. This hides the translation between array index and vport number, and provides flexibility of using different translation mechanism in the future. This patch doesn't change any functionality. Signed-off-by: Bodong Wang <bodong@mellanox.com> Suggested-by: Saeed Mahameed <saeedm@mellanox.com> Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c75
1 files changed, 52 insertions, 23 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 14f7ad67cfe4..4979c7ee0ad7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -56,6 +56,44 @@ enum {
#define UPLINK_REP_INDEX 0
+/* The rep getter/iterator are only valid after esw->total_vports
+ * and vport->vport are initialized in mlx5_eswitch_init.
+ */
+#define mlx5_esw_for_all_reps(esw, i, rep) \
+ for ((i) = MLX5_VPORT_PF; \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) < (esw)->total_vports; (i)++)
+
+#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
+ for ((i) = MLX5_VPORT_FIRST_VF; \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) <= (nvfs); (i)++)
+
+#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
+ for ((i) = (nvfs); \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) >= MLX5_VPORT_FIRST_VF; (i)--)
+
+#define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \
+ for ((vport) = MLX5_VPORT_FIRST_VF; \
+ (vport) <= (nvfs); (vport)++)
+
+#define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \
+ for ((vport) = (nvfs); \
+ (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
+
+static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ u16 idx = vport_num;
+
+ if (vport_num == MLX5_VPORT_UPLINK)
+ idx = UPLINK_REP_INDEX;
+
+ WARN_ON(idx > esw->total_vports - 1);
+ return &esw->offloads.vport_reps[idx];
+}
+
static struct mlx5_flow_table *
esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
static void
@@ -604,7 +642,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
- for (i = 1; i < nvports; i++) {
+ mlx5_esw_for_each_vf_vport(esw, i, mlx5_core_max_vfs(esw->dev)) {
MLX5_SET(fte_match_set_misc, misc, source_port, i);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
@@ -622,7 +660,8 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
return 0;
add_flow_err:
- for (i--; i > 0; i--)
+ nvports = --i;
+ mlx5_esw_for_each_vf_vport_reverse(esw, i, nvports)
mlx5_del_flow_rules(flows[i]);
kvfree(flows);
alloc_flows_err:
@@ -637,7 +676,7 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
flows = esw->fdb_table.offloads.peer_miss_rules;
- for (i = 1; i < esw->total_vports; i++)
+ mlx5_esw_for_each_vf_vport_reverse(esw, i, mlx5_core_max_vfs(esw->dev))
mlx5_del_flow_rules(flows[i]);
kvfree(flows);
@@ -1229,9 +1268,7 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
offloads = &esw->offloads;
mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
- for (vport = 0; vport < total_vfs; vport++) {
- rep = &offloads->vport_reps[vport];
-
+ mlx5_esw_for_all_reps(esw, vport, rep) {
rep->vport = vport;
ether_addr_copy(rep->hw_id, hw_id);
}
@@ -1256,12 +1293,10 @@ static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
struct mlx5_eswitch_rep *rep;
int vport;
- for (vport = nvports; vport >= MLX5_VPORT_FIRST_VF; vport--) {
- rep = &esw->offloads.vport_reps[vport];
+ mlx5_esw_for_each_vf_rep_reverse(esw, vport, rep, nvports)
__esw_offloads_unload_rep(esw, rep, rep_type);
- }
- rep = &esw->offloads.vport_reps[UPLINK_REP_INDEX];
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
__esw_offloads_unload_rep(esw, rep, rep_type);
}
@@ -1289,13 +1324,12 @@ static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
int vport;
int err;
- rep = &esw->offloads.vport_reps[UPLINK_REP_INDEX];
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
goto out;
- for (vport = MLX5_VPORT_FIRST_VF; vport <= nvports; vport++) {
- rep = &esw->offloads.vport_reps[vport];
+ mlx5_esw_for_each_vf_rep(esw, vport, rep, nvports) {
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
goto err_reps;
@@ -1304,7 +1338,7 @@ static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
return 0;
err_reps:
- esw_offloads_unload_reps_type(esw, vport, rep_type);
+ esw_offloads_unload_reps_type(esw, --vport, rep_type);
out:
return err;
}
@@ -1848,10 +1882,9 @@ EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{
- struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
- rep = &offloads->vport_reps[UPLINK_REP_INDEX];
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
return rep->rep_if[rep_type].priv;
}
@@ -1859,13 +1892,9 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
int vport,
u8 rep_type)
{
- struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
- if (vport == MLX5_VPORT_UPLINK)
- vport = UPLINK_REP_INDEX;
-
- rep = &offloads->vport_reps[vport];
+ rep = mlx5_eswitch_get_rep(esw, vport);
if (rep->rep_if[rep_type].valid &&
rep->rep_if[rep_type].get_proto_dev)
@@ -1876,13 +1905,13 @@ EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
{
- return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
+ return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
}
EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
int vport)
{
- return &esw->offloads.vport_reps[vport];
+ return mlx5_eswitch_get_rep(esw, vport);
}
EXPORT_SYMBOL(mlx5_eswitch_vport_rep);