summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en.h
diff options
context:
space:
mode:
authorTariq Toukan2018-03-21 15:31:08 +0100
committerSaeed Mahameed2018-05-25 23:11:00 +0200
commit043dc78ecf07f3fc5b87270518d7f322aea2f748 (patch)
tree8f239559368d60085731b7da57b8256caa0ffe55 /drivers/net/ethernet/mellanox/mlx5/core/en.h
parentnet/mlx5e: Use WQ API functions instead of direct fields access (diff)
downloadkernel-qcow2-linux-043dc78ecf07f3fc5b87270518d7f322aea2f748.tar.gz
kernel-qcow2-linux-043dc78ecf07f3fc5b87270518d7f322aea2f748.tar.xz
kernel-qcow2-linux-043dc78ecf07f3fc5b87270518d7f322aea2f748.zip
net/mlx5e: TX, Use actual WQE size for SQ edge fill
We fill SQ edge with NOPs to avoid WQEs wrap. Here, instead of doing that in advance for the maximum possible WQE size, we do it on-demand using the actual WQE size. We re-order some parts in mlx5e_sq_xmit to finish the calculation of WQE size (ds_cnt) before doing any writes to the WQE buffer. When SQ work queue is fragmented (introduced in an downstream patch), dealing with WQE wraps becomes more frequent. This change would drastically reduce the overhead in this case. Performance tests: ConnectX-5 100Gbps, CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz Packet rate of 64B packets, single transmit ring, size 8K. Before: 14.9 Mpps After: 15.8 Mpps Improvement of 6%. Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en.h')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h3
1 files changed, 1 insertions, 2 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 061c4e90692e..3c0f0a0343fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -183,6 +183,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
+ struct mlx5_wqe_data_seg data[0];
};
struct mlx5e_rx_wqe {
@@ -374,7 +375,6 @@ struct mlx5e_txqsq {
struct netdev_queue *txq;
u32 sqn;
u8 min_inline_mode;
- u16 edge;
struct device *pdev;
__be32 mkey_be;
unsigned long state;
@@ -439,7 +439,6 @@ struct mlx5e_icosq {
struct mlx5_wq_cyc wq;
void __iomem *uar_map;
u32 sqn;
- u16 edge;
unsigned long state;
/* control path */