diff options
author | Mike Marciniszyn | 2015-11-10 01:13:58 +0100 |
---|---|---|
committer | Greg Kroah-Hartman | 2015-11-20 01:55:37 +0100 |
commit | 0a226edd203f1209e4ee6e07a6b41a9cfd8beeb8 (patch) | |
tree | 61ec5424ebca8146ea7e832761fefc875c1f6f76 /drivers/staging/rdma/hfi1/qp.h | |
parent | staging/rdma/hfi1: move hfi1_migrate_qp (diff) | |
download | kernel-qcow2-linux-0a226edd203f1209e4ee6e07a6b41a9cfd8beeb8.tar.gz kernel-qcow2-linux-0a226edd203f1209e4ee6e07a6b41a9cfd8beeb8.tar.xz kernel-qcow2-linux-0a226edd203f1209e4ee6e07a6b41a9cfd8beeb8.zip |
staging/rdma/hfi1: Use parallel workqueue for SDMA engines
The workqueue is currently single threaded per port which for a small number of
SDMA engines is ok.
For hfi1, the there are up to 16 SDMA engines that can be fed descriptors in
parallel.
Use alloc_workqueue with a workqueue limit of the number of sdma engines and
with WQ_CPU_INTENSIVE and WQ_HIGHPRI specified.
Then change send to use the new scheduler which no longer needs to get the
s_lock
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/rdma/hfi1/qp.h')
-rw-r--r-- | drivers/staging/rdma/hfi1/qp.h | 35 |
1 files changed, 35 insertions, 0 deletions
diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index bacfa9c5e8a8..e49cfa6e59e0 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -247,6 +247,41 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter); */ void qp_comm_est(struct hfi1_qp *qp); +/** + * _hfi1_schedule_send - schedule progress + * @qp: the QP + * + * This schedules qp progress w/o regard to the s_flags. + * + * It is only used in the post send, which doesn't hold + * the s_lock. + */ +static inline void _hfi1_schedule_send(struct hfi1_qp *qp) +{ + struct hfi1_ibport *ibp = + to_iport(qp->ibqp.device, qp->port_num); + struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); + struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + + iowait_schedule(&qp->s_iowait, ppd->hfi1_wq, + qp->s_sde ? + qp->s_sde->cpu : + cpumask_first(cpumask_of_node(dd->assigned_node_id))); +} + +/** + * hfi1_schedule_send - schedule progress + * @qp: the QP + * + * This schedules qp progress and caller should hold + * the s_lock. + */ +static inline void hfi1_schedule_send(struct hfi1_qp *qp) +{ + if (hfi1_send_ok(qp)) + _hfi1_schedule_send(qp); +} + void hfi1_migrate_qp(struct hfi1_qp *qp); #endif /* _QP_H */ |