summaryrefslogtreecommitdiffstats
path: root/drivers/staging/rdma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/rdma')
-rw-r--r--drivers/staging/rdma/hfi1/qp.c79
-rw-r--r--drivers/staging/rdma/hfi1/qp.h37
-rw-r--r--drivers/staging/rdma/hfi1/rc.c44
-rw-r--r--drivers/staging/rdma/hfi1/ruc.c40
-rw-r--r--drivers/staging/rdma/hfi1/uc.c21
-rw-r--r--drivers/staging/rdma/hfi1/ud.c22
-rw-r--r--drivers/staging/rdma/hfi1/verbs.c3
-rw-r--r--drivers/staging/rdma/hfi1/verbs.h2
8 files changed, 128 insertions, 120 deletions
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
index ec9ee726267b..00866c07fddc 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -226,16 +226,45 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
}
}
-int hfi1_check_send_wr(struct rvt_qp *qp, struct ib_send_wr *wr)
+/**
+ * hfi1_check_send_wqe - validate wqe
+ * @qp - The qp
+ * @wqe - The built wqe
+ *
+ * validate wqe. This is called
+ * prior to inserting the wqe into
+ * the ring but after the wqe has been
+ * setup.
+ *
+ * Returns 0 on success, -EINVAL on failure
+ *
+ */
+int hfi1_check_send_wqe(struct rvt_qp *qp,
+ struct rvt_swqe *wqe)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct rvt_ah *ah = ibah_to_rvtah(ud_wr(wr)->ah);
+ struct rvt_ah *ah;
- if (qp->ibqp.qp_type != IB_QPT_RC &&
- qp->ibqp.qp_type != IB_QPT_UC &&
- qp->ibqp.qp_type != IB_QPT_SMI &&
- ibp->sl_to_sc[ah->attr.sl] == 0xf) {
- return -EINVAL;
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ if (wqe->length > 0x80000000U)
+ return -EINVAL;
+ break;
+ case IB_QPT_SMI:
+ ah = ibah_to_rvtah(wqe->ud_wr.ah);
+ if (wqe->length > (1 << ah->log_pmtu))
+ return -EINVAL;
+ break;
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ ah = ibah_to_rvtah(wqe->ud_wr.ah);
+ if (wqe->length > (1 << ah->log_pmtu))
+ return -EINVAL;
+ if (ibp->sl_to_sc[ah->attr.sl] == 0xf)
+ return -EINVAL;
+ default:
+ break;
}
return 0;
}
@@ -302,6 +331,42 @@ __be32 hfi1_compute_aeth(struct rvt_qp *qp)
}
/**
+ * _hfi1_schedule_send - schedule progress
+ * @qp: the QP
+ *
+ * This schedules qp progress w/o regard to the s_flags.
+ *
+ * It is only used in the post send, which doesn't hold
+ * the s_lock.
+ */
+void _hfi1_schedule_send(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+
+ iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
+ priv->s_sde ?
+ priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(dd->node)));
+}
+
+/**
+ * hfi1_schedule_send - schedule progress
+ * @qp: the QP
+ *
+ * This schedules qp progress and caller should hold
+ * the s_lock.
+ */
+void hfi1_schedule_send(struct rvt_qp *qp)
+{
+ if (hfi1_send_ok(qp))
+ _hfi1_schedule_send(qp);
+}
+
+/**
* hfi1_get_credit - flush the send work queue of a QP
* @qp: the qp who's send work queue to flush
* @aeth: the Acknowledge Extended Transport Header
diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h
index 973c14b5268a..98827b5dd2a1 100644
--- a/drivers/staging/rdma/hfi1/qp.h
+++ b/drivers/staging/rdma/hfi1/qp.h
@@ -137,41 +137,8 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter);
*/
void qp_comm_est(struct rvt_qp *qp);
-/**
- * _hfi1_schedule_send - schedule progress
- * @qp: the QP
- *
- * This schedules qp progress w/o regard to the s_flags.
- *
- * It is only used in the post send, which doesn't hold
- * the s_lock.
- */
-static inline void _hfi1_schedule_send(struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_ibport *ibp =
- to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
-
- iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
- priv->s_sde ?
- priv->s_sde->cpu :
- cpumask_first(cpumask_of_node(dd->node)));
-}
-
-/**
- * hfi1_schedule_send - schedule progress
- * @qp: the QP
- *
- * This schedules qp progress and caller should hold
- * the s_lock.
- */
-static inline void hfi1_schedule_send(struct rvt_qp *qp)
-{
- if (hfi1_send_ok(qp))
- _hfi1_schedule_send(qp);
-}
+void _hfi1_schedule_send(struct rvt_qp *qp);
+void hfi1_schedule_send(struct rvt_qp *qp);
void hfi1_migrate_qp(struct rvt_qp *qp);
diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c
index a4a44d33d857..a62c9424fa86 100644
--- a/drivers/staging/rdma/hfi1/rc.c
+++ b/drivers/staging/rdma/hfi1/rc.c
@@ -367,6 +367,8 @@ bail:
* hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
* @qp: a pointer to the QP
*
+ * Assumes s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
int hfi1_make_rc_req(struct rvt_qp *qp)
@@ -383,7 +385,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
u32 bth2;
u32 pmtu = qp->pmtu;
char newreq;
- unsigned long flags;
int ret = 0;
int middle = 0;
int delta;
@@ -392,12 +393,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
ohdr = &priv->s_hdr->ibh.u.l.oth;
- /*
- * The lock is needed to synchronize between the sending tasklet,
- * the receive interrupt handler, and timeout re-sends.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
-
/* Sending responses has higher priority over sending requests. */
if ((qp->s_flags & RVT_S_RESP_PENDING) &&
make_rc_ack(dev, qp, ohdr, pmtu))
@@ -407,7 +402,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_iowait.sdma_busy)) {
@@ -463,8 +459,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
qp->s_flags |= RVT_S_WAIT_FENCE;
goto bail;
}
- wqe->psn = qp->s_next_psn;
newreq = 1;
+ qp->s_psn = wqe->psn;
}
/*
* Note that we have to be careful not to modify the
@@ -483,9 +479,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
- wqe->lpsn = wqe->psn;
if (len > pmtu) {
- wqe->lpsn += (len - 1) / pmtu;
qp->s_state = OP(SEND_FIRST);
len = pmtu;
break;
@@ -522,9 +516,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
cpu_to_be32(wqe->rdma_wr.rkey);
ohdr->u.rc.reth.length = cpu_to_be32(len);
hwords += sizeof(struct ib_reth) / sizeof(u32);
- wqe->lpsn = wqe->psn;
if (len > pmtu) {
- wqe->lpsn += (len - 1) / pmtu;
qp->s_state = OP(RDMA_WRITE_FIRST);
len = pmtu;
break;
@@ -559,13 +551,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
qp->s_num_rd_atomic++;
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
- /*
- * Adjust s_next_psn to count the
- * expected number of responses.
- */
- if (len > pmtu)
- qp->s_next_psn += (len - 1) / pmtu;
- wqe->lpsn = qp->s_next_psn++;
}
ohdr->u.rc.reth.vaddr =
cpu_to_be64(wqe->rdma_wr.remote_addr);
@@ -596,7 +581,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
qp->s_num_rd_atomic++;
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
- wqe->lpsn = wqe->psn;
}
if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
qp->s_state = OP(COMPARE_SWAP);
@@ -639,11 +623,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
}
if (wqe->wr.opcode == IB_WR_RDMA_READ)
qp->s_psn = wqe->lpsn + 1;
- else {
+ else
qp->s_psn++;
- if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
- }
break;
case OP(RDMA_READ_RESPONSE_FIRST):
@@ -663,8 +644,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
/* FALLTHROUGH */
case OP(SEND_MIDDLE):
bth2 = mask_psn(qp->s_psn++);
- if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
ss = &qp->s_sge;
len = qp->s_len;
if (len > pmtu) {
@@ -705,8 +684,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
/* FALLTHROUGH */
case OP(RDMA_WRITE_MIDDLE):
bth2 = mask_psn(qp->s_psn++);
- if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
ss = &qp->s_sge;
len = qp->s_len;
if (len > pmtu) {
@@ -777,13 +754,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp)
bth2,
middle);
done:
- ret = 1;
- goto unlock;
-
+ return 1;
bail:
qp->s_flags &= ~RVT_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
@@ -1563,7 +1536,8 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
trace_hfi1_rc_ack(qp, psn);
/* Ignore invalid responses. */
- if (cmp_psn(psn, qp->s_next_psn) >= 0)
+ smp_read_barrier_depends(); /* see post_one_send */
+ if (cmp_psn(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
/* Ignore duplicate responses. */
diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c
index a7add3c5d0f2..6114550bb73f 100644
--- a/drivers/staging/rdma/hfi1/ruc.c
+++ b/drivers/staging/rdma/hfi1/ruc.c
@@ -392,7 +392,8 @@ static void ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY;
again:
- if (sqp->s_last == sqp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
@@ -871,40 +872,43 @@ void hfi1_do_send(struct rvt_qp *qp)
qp->s_flags |= RVT_S_BUSY;
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
timeout = jiffies + (timeout_int) / 8;
cpu = priv->s_sde ? priv->s_sde->cpu :
cpumask_first(cpumask_of_node(ps.ppd->dd->node));
do {
/* Check for a constructed packet to be sent. */
if (qp->s_hdrwords != 0) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
/*
* If the packet cannot be sent now, return and
* the send tasklet will be woken up later.
*/
if (hfi1_verbs_send(qp, &ps))
- break;
+ return;
/* Record that s_hdr is empty. */
qp->s_hdrwords = 0;
- }
-
- /* allow other tasks to run */
- if (unlikely(time_after(jiffies, timeout))) {
- if (workqueue_congested(cpu, ps.ppd->hfi1_wq)) {
- spin_lock_irqsave(&qp->s_lock, flags);
- qp->s_flags &= ~RVT_S_BUSY;
- hfi1_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock,
- flags);
+ /* allow other tasks to run */
+ if (unlikely(time_after(jiffies, timeout))) {
+ if (workqueue_congested(cpu,
+ ps.ppd->hfi1_wq)) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock,
+ flags);
+ this_cpu_inc(
+ *ps.ppd->dd->send_schedule);
+ return;
+ }
+ cond_resched();
this_cpu_inc(*ps.ppd->dd->send_schedule);
- return;
+ timeout = jiffies + (timeout_int) / 8;
}
- cond_resched();
- this_cpu_inc(*ps.ppd->dd->send_schedule);
- timeout = jiffies + (timeout_int) / 8;
+ spin_lock_irqsave(&qp->s_lock, flags);
}
} while (make_req(qp));
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
}
/*
diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c
index 0aa604b7557b..f884b5c8051b 100644
--- a/drivers/staging/rdma/hfi1/uc.c
+++ b/drivers/staging/rdma/hfi1/uc.c
@@ -59,6 +59,8 @@
* hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP
*
+ * Assume s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
int hfi1_make_uc_req(struct rvt_qp *qp)
@@ -66,7 +68,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp)
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_other_headers *ohdr;
struct rvt_swqe *wqe;
- unsigned long flags;
u32 hwords = 5;
u32 bth0 = 0;
u32 len;
@@ -74,13 +75,12 @@ int hfi1_make_uc_req(struct rvt_qp *qp)
int ret = 0;
int middle = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
-
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_iowait.sdma_busy)) {
@@ -106,15 +106,15 @@ int hfi1_make_uc_req(struct rvt_qp *qp)
RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- if (qp->s_cur == qp->s_head) {
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
}
/*
* Start a new request.
*/
- wqe->psn = qp->s_next_psn;
- qp->s_psn = qp->s_next_psn;
+ qp->s_psn = wqe->psn;
qp->s_sge.sge = wqe->sg_list[0];
qp->s_sge.sg_list = wqe->sg_list + 1;
qp->s_sge.num_sge = wqe->wr.num_sge;
@@ -235,15 +235,12 @@ int hfi1_make_uc_req(struct rvt_qp *qp)
qp->s_cur_sge = &qp->s_sge;
qp->s_cur_size = len;
hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
- mask_psn(qp->s_next_psn++), middle);
+ mask_psn(qp->s_psn++), middle);
done:
- ret = 1;
- goto unlock;
+ return 1;
bail:
qp->s_flags &= ~RVT_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c
index fdf6e3bee8f1..ba78e2e3e0bb 100644
--- a/drivers/staging/rdma/hfi1/ud.c
+++ b/drivers/staging/rdma/hfi1/ud.c
@@ -261,6 +261,8 @@ drop:
* hfi1_make_ud_req - construct a UD request packet
* @qp: the QP
*
+ * Assume s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
int hfi1_make_ud_req(struct rvt_qp *qp)
@@ -271,7 +273,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp)
struct hfi1_pportdata *ppd;
struct hfi1_ibport *ibp;
struct rvt_swqe *wqe;
- unsigned long flags;
u32 nwords;
u32 extra_bytes;
u32 bth0;
@@ -281,13 +282,12 @@ int hfi1_make_ud_req(struct rvt_qp *qp)
int next_cur;
u8 sc5;
- spin_lock_irqsave(&qp->s_lock, flags);
-
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_iowait.sdma_busy)) {
@@ -299,7 +299,9 @@ int hfi1_make_ud_req(struct rvt_qp *qp)
goto done;
}
- if (qp->s_cur == qp->s_head)
+ /* see post_one_send() */
+ smp_read_barrier_depends();
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head))
goto bail;
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
@@ -317,6 +319,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp)
if (unlikely(!loopback && (lid == ppd->lid ||
(lid == be16_to_cpu(IB_LID_PERMISSIVE) &&
qp->ibqp.qp_type == IB_QPT_GSI)))) {
+ unsigned long flags;
/*
* If DMAs are in progress, we can't generate
* a completion for the loopback packet since
@@ -329,6 +332,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp)
goto bail;
}
qp->s_cur = next_cur;
+ local_irq_save(flags);
spin_unlock_irqrestore(&qp->s_lock, flags);
ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, flags);
@@ -408,7 +412,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp)
bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
- ohdr->bth[2] = cpu_to_be32(mask_psn(qp->s_next_psn++));
+ ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn));
/*
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR (see 10.2.5).
@@ -423,13 +427,9 @@ int hfi1_make_ud_req(struct rvt_qp *qp)
priv->s_hdr->sde = NULL;
done:
- ret = 1;
- goto unlock;
-
+ return 1;
bail:
qp->s_flags &= ~RVT_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c
index 35f6d92a6249..1df464815247 100644
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ b/drivers/staging/rdma/hfi1/verbs.c
@@ -1533,6 +1533,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send;
dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
+ dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
@@ -1543,7 +1544,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
- dd->verbs_dev.rdi.driver_f.check_send_wr = hfi1_check_send_wr;
+ dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
/* completeion queue */
snprintf(dd->verbs_dev.rdi.dparms.cq_name,
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h
index adb63bb6fae2..d00c55d06c8c 100644
--- a/drivers/staging/rdma/hfi1/verbs.h
+++ b/drivers/staging/rdma/hfi1/verbs.h
@@ -427,7 +427,7 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
-int hfi1_check_send_wr(struct rvt_qp *qp, struct ib_send_wr *wr);
+int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
int has_grh, struct rvt_qp *qp, u32 bth0);