summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4/cm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cm.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c237
1 files changed, 116 insertions, 121 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 855ee44fdb52..0dc62b1438be 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -61,6 +61,10 @@ static char *states[] = {
NULL,
};
+static int dack_mode;
+module_param(dack_mode, int, 0644);
+MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
+
int c4iw_max_read_depth = 8;
module_param(c4iw_max_read_depth, int, 0644);
MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
@@ -113,9 +117,9 @@ static int rcv_win = 256 * 1024;
module_param(rcv_win, int, 0644);
MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
-static int snd_win = 32 * 1024;
+static int snd_win = 128 * 1024;
module_param(snd_win, int, 0644);
-MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
+MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
static struct workqueue_struct *workq;
@@ -168,7 +172,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
if (error < 0)
kfree_skb(skb);
- return error;
+ return error < 0 ? error : 0;
}
int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
@@ -183,7 +187,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
if (error < 0)
kfree_skb(skb);
- return error;
+ return error < 0 ? error : 0;
}
static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
@@ -215,12 +219,11 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
{
- unsigned long flags;
enum c4iw_ep_state state;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->mutex);
state = epc->state;
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->mutex);
return state;
}
@@ -231,12 +234,10 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{
- unsigned long flags;
-
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->mutex);
PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
__state_set(epc, new);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->mutex);
return;
}
@@ -247,8 +248,8 @@ static void *alloc_ep(int size, gfp_t gfp)
epc = kzalloc(size, gfp);
if (epc) {
kref_init(&epc->kref);
- spin_lock_init(&epc->lock);
- init_waitqueue_head(&epc->waitq);
+ mutex_init(&epc->mutex);
+ c4iw_init_wr_wait(&epc->wr_wait);
}
PDBG("%s alloc ep %p\n", __func__, epc);
return epc;
@@ -469,11 +470,12 @@ static int send_connect(struct c4iw_ep *ep)
__func__);
return -ENOMEM;
}
- set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
opt0 = KEEP_ALIVE(1) |
+ DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
L2T_IDX(ep->l2t->idx) |
@@ -780,11 +782,11 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
}
- if (ep->com.cm_id) {
- PDBG("%s ep %p tid %u status %d\n", __func__, ep,
- ep->hwtid, status);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
+
+ PDBG("%s ep %p tid %u status %d\n", __func__, ep,
+ ep->hwtid, status);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+
if (status < 0) {
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
@@ -845,8 +847,10 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
INIT_TP_WR(req, ep->hwtid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
ep->hwtid));
- req->credit_dack = cpu_to_be32(credits);
- set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
+ req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
+ F_RX_DACK_CHANGE |
+ V_RX_DACK_MODE(dack_mode));
+ set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
c4iw_ofld_send(&ep->com.dev->rdev, skb);
return credits;
}
@@ -1124,7 +1128,6 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
- unsigned long flags;
int release = 0;
unsigned int tid = GET_TID(rpl);
struct tid_info *t = dev->rdev.lldi.tids;
@@ -1132,7 +1135,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
BUG_ON(!ep);
- spin_lock_irqsave(&ep->com.lock, flags);
+ mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case ABORTING:
__state_set(&ep->com, DEAD);
@@ -1143,7 +1146,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
__func__, ep, ep->com.state);
break;
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
if (release)
release_ep_resources(ep);
@@ -1206,9 +1209,9 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
}
PDBG("%s ep %p status %d error %d\n", __func__, ep,
rpl->status, status2errno(rpl->status));
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
+ ep->com.wr_wait.ret = status2errno(rpl->status);
+ ep->com.wr_wait.done = 1;
+ wake_up(&ep->com.wr_wait.wait);
return 0;
}
@@ -1242,9 +1245,9 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = lookup_stid(t, stid);
PDBG("%s ep %p\n", __func__, ep);
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
+ ep->com.wr_wait.ret = status2errno(rpl->status);
+ ep->com.wr_wait.done = 1;
+ wake_up(&ep->com.wr_wait.wait);
return 0;
}
@@ -1264,6 +1267,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
opt0 = KEEP_ALIVE(1) |
+ DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
L2T_IDX(ep->l2t->idx) |
@@ -1287,7 +1291,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
ep->hwtid));
rpl->opt0 = cpu_to_be64(opt0);
rpl->opt2 = cpu_to_be32(opt2);
- set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
return;
@@ -1344,7 +1348,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
u16 rss_qid;
u32 mtu;
int step;
- int txq_idx;
+ int txq_idx, ctrlq_idx;
parent_ep = lookup_stid(t, stid);
PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@@ -1365,7 +1369,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
__func__);
goto reject;
}
- dst = &rt->u.dst;
+ dst = &rt->dst;
if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
pdev = ip_dev_find(&init_net, peer_ip);
BUG_ON(!pdev);
@@ -1376,6 +1380,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
txq_idx = cxgb4_port_idx(pdev) * step;
+ ctrlq_idx = cxgb4_port_idx(pdev);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
dev_put(pdev);
@@ -1387,6 +1392,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
+ ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(dst->neighbour->dev) * step];
@@ -1426,6 +1432,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
child_ep->rss_qid = rss_qid;
child_ep->mtu = mtu;
child_ep->txq_idx = txq_idx;
+ child_ep->ctrlq_idx = ctrlq_idx;
PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
tx_chan, smac_idx, rss_qid);
@@ -1467,20 +1474,17 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_peer_close *hdr = cplhdr(skb);
struct c4iw_ep *ep;
struct c4iw_qp_attributes attrs;
- unsigned long flags;
int disconnect = 1;
int release = 0;
int closing = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(hdr);
- int start_timer = 0;
- int stop_timer = 0;
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
dst_confirm(ep->dst);
- spin_lock_irqsave(&ep->com.lock, flags);
+ mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case MPA_REQ_WAIT:
__state_set(&ep->com, CLOSING);
@@ -1498,20 +1502,20 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
* in rdma connection migration (see c4iw_accept_cr()).
*/
__state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
+ ep->com.wr_wait.done = 1;
+ ep->com.wr_wait.ret = -ECONNRESET;
PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
- wake_up(&ep->com.waitq);
+ wake_up(&ep->com.wr_wait.wait);
break;
case MPA_REP_SENT:
__state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
+ ep->com.wr_wait.done = 1;
+ ep->com.wr_wait.ret = -ECONNRESET;
PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
- wake_up(&ep->com.waitq);
+ wake_up(&ep->com.wr_wait.wait);
break;
case FPDU_MODE:
- start_timer = 1;
+ start_ep_timer(ep);
__state_set(&ep->com, CLOSING);
closing = 1;
peer_close_upcall(ep);
@@ -1524,7 +1528,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
disconnect = 0;
break;
case MORIBUND:
- stop_timer = 1;
+ stop_ep_timer(ep);
if (ep->com.cm_id && ep->com.qp) {
attrs.next_state = C4IW_QP_STATE_IDLE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
@@ -1541,16 +1545,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
default:
BUG_ON(1);
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
if (closing) {
attrs.next_state = C4IW_QP_STATE_CLOSING;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
}
- if (start_timer)
- start_ep_timer(ep);
- if (stop_timer)
- stop_ep_timer(ep);
if (disconnect)
c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
if (release)
@@ -1576,10 +1576,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
int ret;
int release = 0;
- unsigned long flags;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(req);
- int stop_timer = 0;
ep = lookup_tid(t, tid);
if (is_neg_adv_abort(req->status)) {
@@ -1587,41 +1585,34 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
ep->hwtid);
return 0;
}
- spin_lock_irqsave(&ep->com.lock, flags);
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
ep->com.state);
+
+ /*
+ * Wake up any threads in rdma_init() or rdma_fini().
+ */
+ ep->com.wr_wait.done = 1;
+ ep->com.wr_wait.ret = -ECONNRESET;
+ wake_up(&ep->com.wr_wait.wait);
+
+ mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case CONNECTING:
break;
case MPA_REQ_WAIT:
- stop_timer = 1;
+ stop_ep_timer(ep);
break;
case MPA_REQ_SENT:
- stop_timer = 1;
+ stop_ep_timer(ep);
connect_reply_upcall(ep, -ECONNRESET);
break;
case MPA_REP_SENT:
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- PDBG("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
break;
case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see c4iw_accept_cr()).
- */
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
- wake_up(&ep->com.waitq);
break;
case MORIBUND:
case CLOSING:
- stop_timer = 1;
+ stop_ep_timer(ep);
/*FALLTHROUGH*/
case FPDU_MODE:
if (ep->com.cm_id && ep->com.qp) {
@@ -1640,7 +1631,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
break;
case DEAD:
PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
return 0;
default:
BUG_ON(1);
@@ -1651,7 +1642,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
__state_set(&ep->com, DEAD);
release = 1;
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
if (!rpl_skb) {
@@ -1667,8 +1658,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
rpl->cmd = CPL_ABORT_NO_RST;
c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
out:
- if (stop_timer)
- stop_ep_timer(ep);
if (release)
release_ep_resources(ep);
return 0;
@@ -1679,11 +1668,9 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_ep *ep;
struct c4iw_qp_attributes attrs;
struct cpl_close_con_rpl *rpl = cplhdr(skb);
- unsigned long flags;
int release = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(rpl);
- int stop_timer = 0;
ep = lookup_tid(t, tid);
@@ -1691,13 +1678,13 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
BUG_ON(!ep);
/* The cm_id may be null if we failed to connect */
- spin_lock_irqsave(&ep->com.lock, flags);
+ mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case CLOSING:
__state_set(&ep->com, MORIBUND);
break;
case MORIBUND:
- stop_timer = 1;
+ stop_ep_timer(ep);
if ((ep->com.cm_id) && (ep->com.qp)) {
attrs.next_state = C4IW_QP_STATE_IDLE;
c4iw_modify_qp(ep->com.qp->rhp,
@@ -1716,9 +1703,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
BUG_ON(1);
break;
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (stop_timer)
- stop_ep_timer(ep);
+ mutex_unlock(&ep->com.mutex);
if (release)
release_ep_resources(ep);
return 0;
@@ -1726,23 +1711,24 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
{
- struct c4iw_ep *ep;
- struct cpl_rdma_terminate *term = cplhdr(skb);
+ struct cpl_rdma_terminate *rpl = cplhdr(skb);
struct tid_info *t = dev->rdev.lldi.tids;
- unsigned int tid = GET_TID(term);
+ unsigned int tid = GET_TID(rpl);
+ struct c4iw_ep *ep;
+ struct c4iw_qp_attributes attrs;
ep = lookup_tid(t, tid);
+ BUG_ON(!ep);
- if (state_read(&ep->com) != FPDU_MODE)
- return 0;
+ if (ep->com.qp) {
+ printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
+ ep->com.qp->wq.sq.qid);
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ } else
+ printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- skb_pull(skb, sizeof *term);
- PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
- skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
- skb->len);
- ep->com.qp->attr.terminate_msg_len = skb->len;
- ep->com.qp->attr.is_terminate_local = 0;
return 0;
}
@@ -1763,8 +1749,8 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
if (credits == 0) {
- PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
- __func__, ep, ep->hwtid, state_read(&ep->com));
+ PDBG("%s 0 credit ack ep %p tid %u state %u\n",
+ __func__, ep, ep->hwtid, state_read(&ep->com));
return 0;
}
@@ -1939,7 +1925,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = -EHOSTUNREACH;
goto fail3;
}
- ep->dst = &rt->u.dst;
+ ep->dst = &rt->dst;
/* get a l2t entry */
if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
@@ -1957,6 +1943,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->txq_idx = cxgb4_port_idx(pdev) * step;
step = ep->com.dev->rdev.lldi.nrxq /
ep->com.dev->rdev.lldi.nchan;
+ ep->ctrlq_idx = cxgb4_port_idx(pdev);
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(pdev) * step];
dev_put(pdev);
@@ -1971,6 +1958,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
step = ep->com.dev->rdev.lldi.ntxq /
ep->com.dev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
+ ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev);
step = ep->com.dev->rdev.lldi.nrxq /
ep->com.dev->rdev.lldi.nchan;
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
@@ -2041,6 +2029,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
}
state_set(&ep->com, LISTEN);
+ c4iw_init_wr_wait(&ep->com.wr_wait);
err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
ep->com.local_addr.sin_addr.s_addr,
ep->com.local_addr.sin_port,
@@ -2049,8 +2038,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto fail3;
/* wait for pass_open_rpl */
- wait_event(ep->com.waitq, ep->com.rpl_done);
- err = ep->com.rpl_err;
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
+ __func__);
if (!err) {
cm_id->provider_data = ep;
goto out;
@@ -2074,15 +2063,14 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
might_sleep();
state_set(&ep->com, DEAD);
- ep->com.rpl_done = 0;
- ep->com.rpl_err = 0;
+ c4iw_init_wr_wait(&ep->com.wr_wait);
err = listen_stop(ep);
if (err)
goto done;
- wait_event(ep->com.waitq, ep->com.rpl_done);
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
+ __func__);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
done:
- err = ep->com.rpl_err;
cm_id->rem_ref(cm_id);
c4iw_put_ep(&ep->com);
return err;
@@ -2091,14 +2079,11 @@ done:
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
{
int ret = 0;
- unsigned long flags;
int close = 0;
int fatal = 0;
struct c4iw_rdev *rdev;
- int start_timer = 0;
- int stop_timer = 0;
- spin_lock_irqsave(&ep->com.lock, flags);
+ mutex_lock(&ep->com.mutex);
PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
states[ep->com.state], abrupt);
@@ -2120,7 +2105,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
ep->com.state = ABORTING;
else {
ep->com.state = CLOSING;
- start_timer = 1;
+ start_ep_timer(ep);
}
set_bit(CLOSE_SENT, &ep->com.flags);
break;
@@ -2128,7 +2113,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
close = 1;
if (abrupt) {
- stop_timer = 1;
+ stop_ep_timer(ep);
ep->com.state = ABORTING;
} else
ep->com.state = MORIBUND;
@@ -2145,11 +2130,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
break;
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (start_timer)
- start_ep_timer(ep);
- if (stop_timer)
- stop_ep_timer(ep);
+ mutex_unlock(&ep->com.mutex);
if (close) {
if (abrupt)
ret = abort_connection(ep, NULL, gfp);
@@ -2163,6 +2144,13 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
return ret;
}
+static int async_event(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_fw6_msg *rpl = cplhdr(skb);
+ c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ return 0;
+}
+
/*
* These are the real handlers that are called from a
* work queue.
@@ -2181,7 +2169,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_ABORT_REQ_RSS] = peer_abort,
[CPL_CLOSE_CON_RPL] = close_con_rpl,
[CPL_RDMA_TERMINATE] = terminate,
- [CPL_FW4_ACK] = fw4_ack
+ [CPL_FW4_ACK] = fw4_ack,
+ [CPL_FW6_MSG] = async_event
};
static void process_timeout(struct c4iw_ep *ep)
@@ -2189,7 +2178,7 @@ static void process_timeout(struct c4iw_ep *ep)
struct c4iw_qp_attributes attrs;
int abort = 1;
- spin_lock_irq(&ep->com.lock);
+ mutex_lock(&ep->com.mutex);
PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
ep->com.state);
switch (ep->com.state) {
@@ -2216,7 +2205,7 @@ static void process_timeout(struct c4iw_ep *ep)
WARN_ON(1);
abort = 0;
}
- spin_unlock_irq(&ep->com.lock);
+ mutex_unlock(&ep->com.mutex);
if (abort)
abort_connection(ep, NULL, GFP_KERNEL);
c4iw_put_ep(&ep->com);
@@ -2244,7 +2233,7 @@ static void process_work(struct work_struct *work)
{
struct sk_buff *skb = NULL;
struct c4iw_dev *dev;
- struct cpl_act_establish *rpl = cplhdr(skb);
+ struct cpl_act_establish *rpl;
unsigned int opcode;
int ret;
@@ -2300,6 +2289,7 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
"for tid %u\n", rpl->status, GET_TID(rpl));
}
+ kfree_skb(skb);
return 0;
}
@@ -2314,20 +2304,25 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
switch (rpl->type) {
case 1:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
- wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
+ wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
if (wr_waitp) {
- wr_waitp->ret = ret;
+ if (ret)
+ wr_waitp->ret = -ret;
+ else
+ wr_waitp->ret = 0;
wr_waitp->done = 1;
wake_up(&wr_waitp->wait);
}
+ kfree_skb(skb);
break;
case 2:
- c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ sched(dev, skb);
break;
default:
printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
rpl->type);
+ kfree_skb(skb);
break;
}
return 0;