summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4
diff options
context:
space:
mode:
authorLinus Torvalds2017-05-03 21:45:55 +0200
committerLinus Torvalds2017-05-03 21:45:55 +0200
commit1684096b1ed813f621fb6cbd06e72235c1c2a0ca (patch)
tree13a228c35d6344f5d23b2c195aa3b026e42aac4b /drivers/infiniband/hw/cxgb4
parentMerge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dto... (diff)
parentinfiniband: avoid dereferencing uninitialized dst on error path (diff)
downloadkernel-qcow2-linux-1684096b1ed813f621fb6cbd06e72235c1c2a0ca.tar.gz
kernel-qcow2-linux-1684096b1ed813f621fb6cbd06e72235c1c2a0ca.tar.xz
kernel-qcow2-linux-1684096b1ed813f621fb6cbd06e72235c1c2a0ca.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma updates from Doug Ledford: "More exchaustive description of primary updates in this release: - Lots of driver fixes and misc fixes across the board. - I had to base on a net-next tree because the IPoIB Accelorator patches needed it. Unfortunately, it was known to Mellanox that there would need to be an IPoIB accelorator patch to the net tree (which left some functions turned off by an #ifdef construct to avoid warnings about defined but unused functions), then one to the RDMA tree, then a fixup that went back and re-enabled the functions in the net tree and enabled their use in the rdma tree Also, a sparse fix was sent to the net tree after I did my pull, and the fixup patch conflicts quite directly with that sparse fix, so I'm going to submit the fixup patch towards the end of the merge window by itself and based upon your master branch at the time. - Two separate rounds of hfi1 fixes, one that got dropped from last release because it came in just a day or two before the end of the merge window and then the one from this release cycle. Of note is that I now have a third series that just landed from Intel yesterday. It is not included in this pull request, but I may submit it by the end of the week. I'll talk to Intel about improving the timing of thier submissions for my workflow. - Changes to our idr usage in the RDMA subsystem that will tie into our cgroup management and also into the upcoming changes for the RDMA kernel<->userspace API. - Addition of support for a netdev to be tied to an RDMA device at the core level - Addition of the VNIC driver from Intel. While IPoIB provides IP over InfiniBand (and *only* IP, no lower layer protocol headers are allowed or supported), the VNIC driver presents a virtual Ethernet device with support for things like varying Ethertypes, VLANs, priorities and other features of Ethernet. The virtual devices are centrally managed by the OPA fabric manager, making this (for the time being) a strictly OPA specific feature. - Improvements to the On-Demand Paging support in the RDMA subsystem. - Addition of three significant OPA changes. While we added OPA support some time ago (via the hfi1 driver), the RDMA subsystem has so far glossed over the areas where OPA and InfiniBand differ. With this release we are starting to add support for the OPA extensions into the RDMA core in the following area: Extended port information for OPA is now supported, extended Address Handle attributes for OPA are now supported, and extended SA Queries to get OPA specific subnet information is now supported. Concise summary from the tag: - idr usage and locking changes - build fix for hns - ipoib debug path record file fix - hfi1 updates - core RDMA netdev addition - Intel VNIC driver addition - Enhanced accelerators for IPoIB addition - Debug cleanups in cxgb3/4 - Trivial cleanups from SF Markus Elfring - Misc rxe fixes from Mellanox - Misc ipoib fixes from Mellanox - Lots of mlx4/mlx5 changes from Mellanox - Misc fixes across the RDMA subsystem - ODP paging fixes and improvements - qedr updates - hfi1 updates - OPA port info patches - OPA AH patches - OPA SA Query patches" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (191 commits) infiniband: avoid dereferencing uninitialized dst on error path IB/SA: Add OPA addr header IB/mlx5: Add port_xmit_wait to counter registers read IB/ocrdma: fix out of bounds access to local buffer IB/mlx4: Fix incorrect order of formal and actual parameters IB/mlx4: Change flush logic so it adheres to the variable name mlx5: Fix mlx5_ib_map_mr_sg mr length IB/rxe: Don't clamp residual length to mtu IB/SA: Add support to query OPA path records IB/SA: Add OPA path record type IB/SA: Split struct sa_path_rec based on IB and ROCE specific fields IB/SA: Introduce path record specific types IB/SA: Rename ib_sa_path_rec to sa_path_rec IB/CM: Add braces when using sizeof IB/core: Define 'opa' rdma_ah_attr type IB/core: Define 'ib' and 'roce' rdma_ah_attr types IB/core: Use rdma_ah_attr accessor functions IB/core: Add accessor functions for rdma_ah_attr fields IB/PVRDMA: Rename ib_ah_attr related functions IB/mthca: Rename to_ib_ah_attr to to_rdma_ah_attr ...
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c393
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c79
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c141
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c39
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h48
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c44
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c44
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c96
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c64
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h24
10 files changed, 474 insertions, 498 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 03a1b0e64fc3..b6fe45924c6e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -101,7 +101,7 @@ MODULE_PARM_DESC(enable_tcp_window_scaling,
int c4iw_debug;
module_param(c4iw_debug, int, 0644);
-MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
+MODULE_PARM_DESC(c4iw_debug, "obsolete");
static int peer2peer = 1;
module_param(peer2peer, int, 0644);
@@ -180,7 +180,7 @@ static void ref_qp(struct c4iw_ep *ep)
static void start_ep_timer(struct c4iw_ep *ep)
{
- PDBG("%s ep %p\n", __func__, ep);
+ pr_debug("%s ep %p\n", __func__, ep);
if (timer_pending(&ep->timer)) {
pr_err("%s timer already started! ep %p\n",
__func__, ep);
@@ -196,7 +196,7 @@ static void start_ep_timer(struct c4iw_ep *ep)
static int stop_ep_timer(struct c4iw_ep *ep)
{
- PDBG("%s ep %p stopping\n", __func__, ep);
+ pr_debug("%s ep %p stopping\n", __func__, ep);
del_timer_sync(&ep->timer);
if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
c4iw_put_ep(&ep->com);
@@ -212,7 +212,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
if (c4iw_fatal_error(rdev)) {
kfree_skb(skb);
- PDBG("%s - device in error state - dropping\n", __func__);
+ pr_debug("%s - device in error state - dropping\n", __func__);
return -EIO;
}
error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
@@ -229,7 +229,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
if (c4iw_fatal_error(rdev)) {
kfree_skb(skb);
- PDBG("%s - device in error state - dropping\n", __func__);
+ pr_debug("%s - device in error state - dropping\n", __func__);
return -EIO;
}
error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
@@ -263,10 +263,10 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
- PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
- TCPOPT_MSS_G(opt), ep->mss, ep->emss);
- PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
- ep->mss, ep->emss);
+ pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+ TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+ pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
+ ep->mss, ep->emss);
}
static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
@@ -287,7 +287,7 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{
mutex_lock(&epc->mutex);
- PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
+ pr_debug("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
__state_set(epc, new);
mutex_unlock(&epc->mutex);
return;
@@ -322,7 +322,7 @@ static void *alloc_ep(int size, gfp_t gfp)
mutex_init(&epc->mutex);
c4iw_init_wr_wait(&epc->wr_wait);
}
- PDBG("%s alloc ep %p\n", __func__, epc);
+ pr_debug("%s alloc ep %p\n", __func__, epc);
return epc;
}
@@ -384,7 +384,7 @@ void _c4iw_free_ep(struct kref *kref)
struct c4iw_ep *ep;
ep = container_of(kref, struct c4iw_ep, com.kref);
- PDBG("%s ep %p state %s\n", __func__, ep, states[ep->com.state]);
+ pr_debug("%s ep %p state %s\n", __func__, ep, states[ep->com.state]);
if (test_bit(QP_REFERENCED, &ep->com.flags))
deref_qp(ep);
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
@@ -467,7 +467,7 @@ static struct net_device *get_real_dev(struct net_device *egress_dev)
static void arp_failure_discard(void *handle, struct sk_buff *skb)
{
- pr_err(MOD "ARP failure\n");
+ pr_err("ARP failure\n");
kfree_skb(skb);
}
@@ -528,7 +528,7 @@ static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
{
struct c4iw_ep *ep = handle;
- pr_err(MOD "ARP failure during accept - tid %u -dropping connection\n",
+ pr_err("ARP failure during accept - tid %u - dropping connection\n",
ep->hwtid);
__state_set(&ep->com, DEAD);
@@ -542,7 +542,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{
struct c4iw_ep *ep = handle;
- printk(KERN_ERR MOD "ARP failure during connect\n");
+ pr_err("ARP failure during connect\n");
connect_reply_upcall(ep, -EHOSTUNREACH);
__state_set(&ep->com, DEAD);
if (ep->com.remote_addr.ss_family == AF_INET6) {
@@ -567,7 +567,7 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
struct c4iw_rdev *rdev = &ep->com.dev->rdev;
struct cpl_abort_req *req = cplhdr(skb);
- PDBG("%s rdev %p\n", __func__, rdev);
+ pr_debug("%s rdev %p\n", __func__, rdev);
req->cmd = CPL_ABORT_NO_RST;
ret = c4iw_ofld_send(rdev, skb);
if (ret) {
@@ -642,7 +642,7 @@ static int send_halfclose(struct c4iw_ep *ep)
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
if (WARN_ON(!skb))
return -ENOMEM;
@@ -657,7 +657,7 @@ static int send_abort(struct c4iw_ep *ep)
u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
if (WARN_ON(!req_skb))
return -ENOMEM;
@@ -720,12 +720,11 @@ static int send_connect(struct c4iw_ep *ep)
roundup(sizev4, 16) :
roundup(sizev6, 16);
- PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
+ pr_debug("%s ep %p atid %u\n", __func__, ep, ep->atid);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
- __func__);
+ pr_err("%s - failed to alloc skb\n", __func__);
return -ENOMEM;
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
@@ -822,13 +821,13 @@ static int send_connect(struct c4iw_ep *ep)
t5req->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t5req->rsvd = cpu_to_be32(isn);
- PDBG("%s snd_isn %u\n", __func__, t5req->rsvd);
+ pr_debug("%s snd_isn %u\n", __func__, t5req->rsvd);
t5req->opt2 = cpu_to_be32(opt2);
} else {
t6req->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t6req->rsvd = cpu_to_be32(isn);
- PDBG("%s snd_isn %u\n", __func__, t6req->rsvd);
+ pr_debug("%s snd_isn %u\n", __func__, t6req->rsvd);
t6req->opt2 = cpu_to_be32(opt2);
}
}
@@ -877,13 +876,13 @@ static int send_connect(struct c4iw_ep *ep)
t5req6->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t5req6->rsvd = cpu_to_be32(isn);
- PDBG("%s snd_isn %u\n", __func__, t5req6->rsvd);
+ pr_debug("%s snd_isn %u\n", __func__, t5req6->rsvd);
t5req6->opt2 = cpu_to_be32(opt2);
} else {
t6req6->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t6req6->rsvd = cpu_to_be32(isn);
- PDBG("%s snd_isn %u\n", __func__, t6req6->rsvd);
+ pr_debug("%s snd_isn %u\n", __func__, t6req6->rsvd);
t6req6->opt2 = cpu_to_be32(opt2);
}
@@ -907,7 +906,8 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
struct mpa_message *mpa;
struct mpa_v2_conn_params mpa_v2_params;
- PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+ pr_debug("%s ep %p tid %u pd_len %d\n",
+ __func__, ep, ep->hwtid, ep->plen);
BUG_ON(skb_cloned(skb));
@@ -961,8 +961,8 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
if (mpa_rev_to_use == 2) {
mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
sizeof (struct mpa_v2_conn_params));
- PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
- ep->ord);
+ pr_debug("%s initiator ird %u ord %u\n", __func__, ep->ird,
+ ep->ord);
mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord);
@@ -1014,7 +1014,8 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct sk_buff *skb;
struct mpa_v2_conn_params mpa_v2_params;
- PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+ pr_debug("%s ep %p tid %u pd_len %d\n",
+ __func__, ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
@@ -1023,7 +1024,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
- printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
+ pr_err("%s - cannot alloc skb!\n", __func__);
return -ENOMEM;
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
@@ -1094,7 +1095,8 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct sk_buff *skb;
struct mpa_v2_conn_params mpa_v2_params;
- PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+ pr_debug("%s ep %p tid %u pd_len %d\n",
+ __func__, ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
@@ -1103,7 +1105,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
- printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
+ pr_err("%s - cannot alloc skb!\n", __func__);
return -ENOMEM;
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
@@ -1185,8 +1187,8 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_atid(t, atid);
- PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
- be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
+ pr_debug("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
+ be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
mutex_lock(&ep->com.mutex);
dst_confirm(ep->dst);
@@ -1229,13 +1231,13 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
{
struct iw_cm_event event;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE;
event.status = status;
if (ep->com.cm_id) {
- PDBG("close complete delivered ep %p cm_id %p tid %u\n",
- ep, ep->com.cm_id, ep->hwtid);
+ pr_debug("close complete delivered ep %p cm_id %p tid %u\n",
+ ep, ep->com.cm_id, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
deref_cm_id(&ep->com);
set_bit(CLOSE_UPCALL, &ep->com.history);
@@ -1246,12 +1248,12 @@ static void peer_close_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_DISCONNECT;
if (ep->com.cm_id) {
- PDBG("peer close delivered ep %p cm_id %p tid %u\n",
- ep, ep->com.cm_id, ep->hwtid);
+ pr_debug("peer close delivered ep %p cm_id %p tid %u\n",
+ ep, ep->com.cm_id, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
set_bit(DISCONN_UPCALL, &ep->com.history);
}
@@ -1261,13 +1263,13 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE;
event.status = -ECONNRESET;
if (ep->com.cm_id) {
- PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
- ep->com.cm_id, ep->hwtid);
+ pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep,
+ ep->com.cm_id, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
deref_cm_id(&ep->com);
set_bit(ABORT_UPCALL, &ep->com.history);
@@ -1278,7 +1280,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
{
struct iw_cm_event event;
- PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
+ pr_debug("%s ep %p tid %u status %d\n",
+ __func__, ep, ep->hwtid, status);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REPLY;
event.status = status;
@@ -1307,8 +1310,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
}
}
- PDBG("%s ep %p tid %u status %d\n", __func__, ep,
- ep->hwtid, status);
+ pr_debug("%s ep %p tid %u status %d\n", __func__, ep,
+ ep->hwtid, status);
set_bit(CONN_RPL_UPCALL, &ep->com.history);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
@@ -1321,7 +1324,7 @@ static int connect_request_upcall(struct c4iw_ep *ep)
struct iw_cm_event event;
int ret;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
memcpy(&event.local_addr, &ep->com.local_addr,
@@ -1358,13 +1361,13 @@ static void established_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_ESTABLISHED;
event.ird = ep->ord;
event.ord = ep->ird;
if (ep->com.cm_id) {
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
set_bit(ESTAB_UPCALL, &ep->com.history);
}
@@ -1376,10 +1379,11 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
u32 credit_dack;
- PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
+ pr_debug("%s ep %p tid %u credits %u\n",
+ __func__, ep, ep->hwtid, credits);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
- printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
+ pr_err("update_rx_credits - cannot alloc skb!\n");
return 0;
}
@@ -1427,7 +1431,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
int err;
int disconnect = 0;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
/*
* If we get more than the supported amount of private data
@@ -1454,8 +1458,8 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
/* Validate MPA header. */
if (mpa->revision > mpa_rev) {
- printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
- " Received = %d\n", __func__, mpa_rev, mpa->revision);
+ pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
+ __func__, mpa_rev, mpa->revision);
err = -EPROTO;
goto err_stop_timer;
}
@@ -1525,8 +1529,9 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
resp_ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
- PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
- __func__, resp_ird, resp_ord, ep->ird, ep->ord);
+ pr_debug("%s responder ird %u ord %u ep ird %u ord %u\n",
+ __func__,
+ resp_ird, resp_ord, ep->ird, ep->ord);
/*
* This is a double-check. Ideally, below checks are
@@ -1570,12 +1575,11 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
if (peer2peer)
ep->mpa_attr.p2p_type = p2p_type;
- PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
- "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
- "%d\n", __func__, ep->mpa_attr.crc_enabled,
- ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
- ep->mpa_attr.p2p_type, p2p_type);
+ pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
+ __func__, ep->mpa_attr.crc_enabled,
+ ep->mpa_attr.recv_marker_enabled,
+ ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
+ ep->mpa_attr.p2p_type, p2p_type);
/*
* If responder's RTR does not match with that of initiator, assign
@@ -1610,7 +1614,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* supports, generate TERM message
*/
if (rtr_mismatch) {
- printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
+ pr_err("%s: RTR mismatch, sending TERM\n", __func__);
attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_NOMATCH_RTR;
attrs.next_state = C4IW_QP_STATE_TERMINATE;
@@ -1629,8 +1633,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* initiator ORD.
*/
if (insuff_ird) {
- printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
- __func__);
+ pr_err("%s: Insufficient IRD, sending TERM\n", __func__);
attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_INSUFF_IRD;
attrs.next_state = C4IW_QP_STATE_TERMINATE;
@@ -1669,7 +1672,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
struct mpa_v2_conn_params *mpa_v2_params;
u16 plen;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
/*
* If we get more than the supported amount of private data
@@ -1678,7 +1681,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
goto err_stop_timer;
- PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
/*
* Copy the new data into our accumulation buffer.
@@ -1694,15 +1697,15 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (ep->mpa_pkt_len < sizeof(*mpa))
return 0;
- PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
mpa = (struct mpa_message *) ep->mpa_pkt;
/*
* Validate MPA Header.
*/
if (mpa->revision > mpa_rev) {
- printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
- " Received = %d\n", __func__, mpa_rev, mpa->revision);
+ pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
+ __func__, mpa_rev, mpa->revision);
goto err_stop_timer;
}
@@ -1757,8 +1760,8 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
ep->ord = min_t(u32, ep->ord,
cur_max_read_depth(ep->com.dev));
- PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
- ep->ord);
+ pr_debug("%s initiator ird %u ord %u\n",
+ __func__, ep->ird, ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
if (peer2peer) {
if (ntohs(mpa_v2_params->ord) &
@@ -1775,11 +1778,11 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (peer2peer)
ep->mpa_attr.p2p_type = p2p_type;
- PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
- "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
- ep->mpa_attr.p2p_type);
+ pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
+ __func__,
+ ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
+ ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
+ ep->mpa_attr.p2p_type);
__state_set(&ep->com, MPA_REQ_RCVD);
@@ -1815,7 +1818,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
+ pr_debug("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
skb_pull(skb, sizeof(*hdr));
skb_trim(skb, dlen);
mutex_lock(&ep->com.mutex);
@@ -1866,10 +1869,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid);
if (!ep) {
- printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
+ pr_warn("Abort rpl to freed endpoint\n");
return 0;
}
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case ABORTING:
@@ -1878,8 +1881,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
release = 1;
break;
default:
- printk(KERN_ERR "%s ep %p state %d\n",
- __func__, ep, ep->com.state);
+ pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
break;
}
mutex_unlock(&ep->com.mutex);
@@ -1995,7 +1997,8 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
{
ep->snd_win = snd_win;
ep->rcv_win = rcv_win;
- PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win);
+ pr_debug("%s snd_win %d rcv_win %d\n",
+ __func__, ep->snd_win, ep->rcv_win);
}
#define ACT_OPEN_RETRY_COUNT 2
@@ -2100,7 +2103,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
int iptype;
__u8 *ra;
- PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
+ pr_debug("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
init_timer(&ep->timer);
c4iw_init_wr_wait(&ep->com.wr_wait);
@@ -2124,7 +2127,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
*/
ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
if (ep->atid == -1) {
- pr_err("%s - cannot alloc atid.\n", __func__);
+ pr_err("%s - cannot alloc atid\n", __func__);
err = -ENOMEM;
goto fail2;
}
@@ -2151,7 +2154,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
ra = (__u8 *)&raddr6->sin6_addr;
}
if (!ep->dst) {
- pr_err("%s - cannot find route.\n", __func__);
+ pr_err("%s - cannot find route\n", __func__);
err = -EHOSTUNREACH;
goto fail3;
}
@@ -2159,13 +2162,13 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
ep->com.dev->rdev.lldi.adapter_type,
ep->com.cm_id->tos);
if (err) {
- pr_err("%s - cannot alloc l2e.\n", __func__);
+ pr_err("%s - cannot alloc l2e\n", __func__);
goto fail4;
}
- PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
- ep->l2t->idx);
+ pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ ep->l2t->idx);
state_set(&ep->com, CONNECTING);
ep->tos = ep->com.cm_id->tos;
@@ -2215,12 +2218,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
- PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
- status, status2errno(status));
+ pr_debug("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
+ status, status2errno(status));
if (cxgb_is_neg_adv(status)) {
- PDBG("%s Connection problems for atid %u status %u (%s)\n",
- __func__, atid, status, neg_adv_str(status));
+ pr_debug("%s Connection problems for atid %u status %u (%s)\n",
+ __func__, atid, status, neg_adv_str(status));
ep->stats.connect_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
@@ -2315,11 +2318,11 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
if (!ep) {
- PDBG("%s stid %d lookup failure!\n", __func__, stid);
+ pr_debug("%s stid %d lookup failure!\n", __func__, stid);
goto out;
}
- PDBG("%s ep %p status %d error %d\n", __func__, ep,
- rpl->status, status2errno(rpl->status));
+ pr_debug("%s ep %p status %d error %d\n", __func__, ep,
+ rpl->status, status2errno(rpl->status));
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
c4iw_put_ep(&ep->com);
out:
@@ -2332,7 +2335,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int stid = GET_TID(rpl);
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
- PDBG("%s ep %p\n", __func__, ep);
+ pr_debug("%s ep %p\n", __func__, ep);
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
c4iw_put_ep(&ep->com);
return 0;
@@ -2350,7 +2353,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
int win;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
BUG_ON(skb_cloned(skb));
skb_get(skb);
@@ -2421,7 +2424,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
- PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
+ pr_debug("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
}
rpl->opt0 = cpu_to_be64(opt0);
@@ -2434,7 +2437,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
{
- PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
+ pr_debug("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
BUG_ON(skb_cloned(skb));
skb_trim(skb, sizeof(struct cpl_tid_release));
release_tid(&dev->rdev, hwtid, skb);
@@ -2460,12 +2463,13 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!parent_ep) {
- PDBG("%s connect request on invalid stid %d\n", __func__, stid);
+ pr_debug("%s connect request on invalid stid %d\n",
+ __func__, stid);
goto reject;
}
if (state_read(&parent_ep->com) != LISTEN) {
- PDBG("%s - listening ep not in LISTEN\n", __func__);
+ pr_debug("%s - listening ep not in LISTEN\n", __func__);
goto reject;
}
@@ -2474,18 +2478,18 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
/* Find output route */
if (iptype == 4) {
- PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
- , __func__, parent_ep, hwtid,
- local_ip, peer_ip, ntohs(local_port),
- ntohs(peer_port), peer_mss);
+ pr_debug("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
+ , __func__, parent_ep, hwtid,
+ local_ip, peer_ip, ntohs(local_port),
+ ntohs(peer_port), peer_mss);
dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
*(__be32 *)local_ip, *(__be32 *)peer_ip,
local_port, peer_port, tos);
} else {
- PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
- , __func__, parent_ep, hwtid,
- local_ip, peer_ip, ntohs(local_port),
- ntohs(peer_port), peer_mss);
+ pr_debug("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
+ , __func__, parent_ep, hwtid,
+ local_ip, peer_ip, ntohs(local_port),
+ ntohs(peer_port), peer_mss);
dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
local_ip, peer_ip, local_port, peer_port,
PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
@@ -2493,15 +2497,13 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
&parent_ep->com.local_addr)->sin6_scope_id);
}
if (!dst) {
- printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
- __func__);
+ pr_err("%s - failed to find dst entry!\n", __func__);
goto reject;
}
child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
if (!child_ep) {
- printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
- __func__);
+ pr_err("%s - failed to allocate ep entry!\n", __func__);
dst_release(dst);
goto reject;
}
@@ -2509,8 +2511,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
parent_ep->com.dev->rdev.lldi.adapter_type, tos);
if (err) {
- printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
- __func__);
+ pr_err("%s - failed to allocate l2t entry!\n", __func__);
dst_release(dst);
kfree(child_ep);
goto reject;
@@ -2571,8 +2572,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
child_ep->dst = dst;
child_ep->hwtid = hwtid;
- PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
- child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
+ pr_debug("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
+ child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
init_timer(&child_ep->timer);
cxgb4_insert_tid(t, child_ep, hwtid);
@@ -2607,12 +2608,12 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
int ret;
ep = get_ep_from_tid(dev, tid);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
- PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
- ntohs(req->tcp_opt));
+ pr_debug("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
+ ntohs(req->tcp_opt));
set_emss(ep, ntohs(req->tcp_opt));
@@ -2644,7 +2645,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
if (!ep)
return 0;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
dst_confirm(ep->dst);
set_bit(PEER_CLOSE, &ep->com.history);
@@ -2666,12 +2667,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
* in rdma connection migration (see c4iw_accept_cr()).
*/
__state_set(&ep->com, CLOSING);
- PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
+ pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
break;
case MPA_REP_SENT:
__state_set(&ep->com, CLOSING);
- PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
+ pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
break;
case FPDU_MODE:
@@ -2735,17 +2736,17 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
if (cxgb_is_neg_adv(req->status)) {
- PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
- neg_adv_str(req->status));
+ pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
+ __func__, ep->hwtid, req->status,
+ neg_adv_str(req->status));
ep->stats.abort_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
mutex_unlock(&dev->rdev.stats.lock);
goto deref_ep;
}
- PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
- ep->com.state);
+ pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+ ep->com.state);
set_bit(PEER_ABORT, &ep->com.history);
/*
@@ -2777,8 +2778,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
* do some housekeeping so as to re-initiate the
* connection
*/
- PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
- mpa_rev);
+ pr_debug("%s: mpa_rev=%d. Retrying with mpav1\n",
+ __func__, mpa_rev);
ep->retry_with_mpa_v1 = 1;
}
break;
@@ -2797,16 +2798,14 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
&attrs, 1);
if (ret)
- printk(KERN_ERR MOD
- "%s - qp <- error failed!\n",
- __func__);
+ pr_err("%s - qp <- error failed!\n", __func__);
}
peer_abort_upcall(ep);
break;
case ABORTING:
break;
case DEAD:
- PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
+ pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
mutex_unlock(&ep->com.mutex);
goto deref_ep;
default:
@@ -2870,7 +2869,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (!ep)
return 0;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
BUG_ON(!ep);
/* The cm_id may be null if we failed to connect */
@@ -2918,13 +2917,13 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
BUG_ON(!ep);
if (ep && ep->com.qp) {
- printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
- ep->com.qp->wq.sq.qid);
+ pr_warn("TERM received tid %u qpid %u\n",
+ tid, ep->com.qp->wq.sq.qid);
attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
} else
- printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
+ pr_warn("TERM received tid %u no ep/qp\n", tid);
c4iw_put_ep(&ep->com);
return 0;
@@ -2946,18 +2945,19 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
+ pr_debug("%s ep %p tid %u credits %u\n",
+ __func__, ep, ep->hwtid, credits);
if (credits == 0) {
- PDBG("%s 0 credit ack ep %p tid %u state %u\n",
- __func__, ep, ep->hwtid, state_read(&ep->com));
+ pr_debug("%s 0 credit ack ep %p tid %u state %u\n",
+ __func__, ep, ep->hwtid, state_read(&ep->com));
goto out;
}
dst_confirm(ep->dst);
if (ep->mpa_skb) {
- PDBG("%s last streaming msg ack ep %p tid %u state %u "
- "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
- state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
+ pr_debug("%s last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
+ __func__, ep, ep->hwtid,
+ state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
mutex_lock(&ep->com.mutex);
kfree_skb(ep->mpa_skb);
ep->mpa_skb = NULL;
@@ -2975,7 +2975,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
int abort;
struct c4iw_ep *ep = to_ep(cm_id);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
if (ep->com.state != MPA_REQ_RCVD) {
@@ -3006,7 +3006,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
int abort = 0;
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
if (ep->com.state != MPA_REQ_RCVD) {
@@ -3059,7 +3059,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->ird = 1;
}
- PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
+ pr_debug("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
@@ -3188,7 +3188,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
if (!ep) {
- printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
+ pr_err("%s - cannot alloc ep\n", __func__);
err = -ENOMEM;
goto out;
}
@@ -3215,20 +3215,20 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->com.dev = dev;
ep->com.qp = get_qhp(dev, conn_param->qpn);
if (!ep->com.qp) {
- PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
+ pr_debug("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
err = -EINVAL;
goto fail2;
}
ref_qp(ep);
- PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
- ep->com.qp, cm_id);
+ pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
+ ep->com.qp, cm_id);
/*
* Allocate an active TID to initiate a TCP connection.
*/
ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
if (ep->atid == -1) {
- printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
+ pr_err("%s - cannot alloc atid\n", __func__);
err = -ENOMEM;
goto fail2;
}
@@ -3258,9 +3258,9 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
/* find a route */
- PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
- __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
- ra, ntohs(raddr->sin_port));
+ pr_debug("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
+ __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
+ ra, ntohs(raddr->sin_port));
ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
laddr->sin_addr.s_addr,
raddr->sin_addr.s_addr,
@@ -3280,10 +3280,10 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
/* find a route */
- PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
- __func__, laddr6->sin6_addr.s6_addr,
- ntohs(laddr6->sin6_port),
- raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
+ pr_debug("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
+ __func__, laddr6->sin6_addr.s6_addr,
+ ntohs(laddr6->sin6_port),
+ raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
laddr6->sin6_addr.s6_addr,
raddr6->sin6_addr.s6_addr,
@@ -3292,7 +3292,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
raddr6->sin6_scope_id);
}
if (!ep->dst) {
- printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
+ pr_err("%s - cannot find route\n", __func__);
err = -EHOSTUNREACH;
goto fail3;
}
@@ -3300,13 +3300,13 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
if (err) {
- printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
+ pr_err("%s - cannot alloc l2e\n", __func__);
goto fail4;
}
- PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
- ep->l2t->idx);
+ pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ ep->l2t->idx);
state_set(&ep->com, CONNECTING);
ep->tos = cm_id->tos;
@@ -3414,12 +3414,12 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
if (!ep) {
- printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
+ pr_err("%s - cannot alloc ep\n", __func__);
err = -ENOMEM;
goto fail1;
}
skb_queue_head_init(&ep->com.ep_skb_list);
- PDBG("%s ep %p\n", __func__, ep);
+ pr_debug("%s ep %p\n", __func__, ep);
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
ep->com.dev = dev;
@@ -3439,7 +3439,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->m_local_addr.ss_family, ep);
if (ep->stid == -1) {
- printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
+ pr_err("%s - cannot alloc stid\n", __func__);
err = -ENOMEM;
goto fail2;
}
@@ -3473,7 +3473,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
int err;
struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
- PDBG("%s ep %p\n", __func__, ep);
+ pr_debug("%s ep %p\n", __func__, ep);
might_sleep();
state_set(&ep->com, DEAD);
@@ -3514,8 +3514,8 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
mutex_lock(&ep->com.mutex);
- PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
- states[ep->com.state], abrupt);
+ pr_debug("%s ep %p state %s, abrupt %d\n", __func__, ep,
+ states[ep->com.state], abrupt);
/*
* Ref the ep here in case we have fatal errors causing the
@@ -3568,8 +3568,8 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MORIBUND:
case ABORTING:
case DEAD:
- PDBG("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
+ pr_debug("%s ignoring disconnect ep %p state %u\n",
+ __func__, ep, ep->com.state);
break;
default:
BUG();
@@ -3600,8 +3600,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
C4IW_QP_ATTR_NEXT_STATE,
&attrs, 1);
if (ret)
- pr_err(MOD
- "%s - qp <- error failed!\n",
+ pr_err("%s - qp <- error failed!\n",
__func__);
}
fatal = 1;
@@ -3674,7 +3673,7 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
BUG_ON(!rpl_skb);
if (req->retval) {
- PDBG("%s passive open failure %d\n", __func__, req->retval);
+ pr_debug("%s passive open failure %d\n", __func__, req->retval);
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.pas_ofld_conn_fails++;
mutex_unlock(&dev->rdev.stats.lock);
@@ -3800,6 +3799,8 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
int ret;
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
+ if (!req_skb)
+ return;
req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
@@ -3890,7 +3891,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!lep) {
- PDBG("%s connect request on invalid stid %d\n", __func__, stid);
+ pr_debug("%s connect request on invalid stid %d\n",
+ __func__, stid);
goto reject;
}
@@ -3927,9 +3929,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
skb_set_transport_header(skb, (void *)tcph - (void *)rss);
skb_get(skb);
- PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
- ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
- ntohs(tcph->source), iph->tos);
+ pr_debug("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
+ ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
+ ntohs(tcph->source), iph->tos);
dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
iph->daddr, iph->saddr, tcph->dest,
@@ -4026,8 +4028,8 @@ static void process_timeout(struct c4iw_ep *ep)
int abort = 1;
mutex_lock(&ep->com.mutex);
- PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
- ep->com.state);
+ pr_debug("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
+ ep->com.state);
set_bit(TIMEDOUT, &ep->com.history);
switch (ep->com.state) {
case MPA_REQ_SENT:
@@ -4157,8 +4159,8 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE) {
- printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
- "for tid %u\n", rpl->status, GET_TID(rpl));
+ pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
+ rpl->status, GET_TID(rpl));
}
kfree_skb(skb);
return 0;
@@ -4170,13 +4172,13 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_wr_wait *wr_waitp;
int ret;
- PDBG("%s type %u\n", __func__, rpl->type);
+ pr_debug("%s type %u\n", __func__, rpl->type);
switch (rpl->type) {
case FW6_TYPE_WR_RPL:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
- PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
+ pr_debug("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
if (wr_waitp)
c4iw_wake_up(wr_waitp, ret ? -ret : 0);
kfree_skb(skb);
@@ -4186,8 +4188,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
sched(dev, skb);
break;
default:
- printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
- rpl->type);
+ pr_err("%s unexpected fw6 msg type %u\n",
+ __func__, rpl->type);
kfree_skb(skb);
break;
}
@@ -4203,19 +4205,18 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid);
/* This EP will be dereferenced in peer_abort() */
if (!ep) {
- printk(KERN_WARNING MOD
- "Abort on non-existent endpoint, tid %d\n", tid);
+ pr_warn("Abort on non-existent endpoint, tid %d\n", tid);
kfree_skb(skb);
return 0;
}
if (cxgb_is_neg_adv(req->status)) {
- PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
- neg_adv_str(req->status));
+ pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
+ __func__, ep->hwtid, req->status,
+ neg_adv_str(req->status));
goto out;
}
- PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
- ep->com.state);
+ pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+ ep->com.state);
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
out:
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index bec82a600d77..14de5bde1b63 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -146,7 +146,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
ret = c4iw_ofld_send(rdev, skb);
if (ret)
goto err4;
- PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
+ pr_debug("%s wait_event wr_wait %p\n", __func__, &wr_wait);
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
if (ret)
goto err4;
@@ -159,7 +159,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
&cq->bar2_qid,
user ? &cq->bar2_pa : NULL);
if (user && !cq->bar2_pa) {
- pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
+ pr_warn("%s: cqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), cq->cqid);
ret = -EINVAL;
goto err4;
@@ -180,8 +180,8 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
{
struct t4_cqe cqe;
- PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
- wq, cq, cq->sw_cidx, cq->sw_pidx);
+ pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+ wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
CQE_OPCODE_V(FW_RI_SEND) |
@@ -199,8 +199,8 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
int in_use = wq->rq.in_use - count;
BUG_ON(in_use < 0);
- PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
- wq, cq, wq->rq.in_use, count);
+ pr_debug("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
+ wq, cq, wq->rq.in_use, count);
while (in_use--) {
insert_recv_cqe(wq, cq);
flushed++;
@@ -213,8 +213,8 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
{
struct t4_cqe cqe;
- PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
- wq, cq, cq->sw_cidx, cq->sw_pidx);
+ pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+ wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
CQE_OPCODE_V(swcqe->opcode) |
@@ -283,8 +283,8 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
/*
* Insert this completed cqe into the swcq.
*/
- PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
- __func__, cidx, cq->sw_pidx);
+ pr_debug("%s moving cqe into swcq sq idx %u cq idx %u\n",
+ __func__, cidx, cq->sw_pidx);
swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
t4_swcq_produce(cq);
@@ -339,7 +339,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
struct t4_swsqe *swsqe;
int ret;
- PDBG("%s cqid 0x%x\n", __func__, chp->cq.cqid);
+ pr_debug("%s cqid 0x%x\n", __func__, chp->cq.cqid);
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
/*
@@ -432,7 +432,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
u32 ptr;
*count = 0;
- PDBG("%s count zero %d\n", __func__, *count);
+ pr_debug("%s count zero %d\n", __func__, *count);
ptr = cq->sw_cidx;
while (ptr != cq->sw_pidx) {
cqe = &cq->sw_queue[ptr];
@@ -442,7 +442,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
if (++ptr == cq->size)
ptr = 0;
}
- PDBG("%s cq %p count %d\n", __func__, cq, *count);
+ pr_debug("%s cq %p count %d\n", __func__, cq, *count);
}
/*
@@ -473,12 +473,11 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (ret)
return ret;
- PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
- " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
- CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
- CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
- CQE_WRID_LOW(hw_cqe));
+ pr_debug("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
+ __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
+ CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
+ CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
+ CQE_WRID_LOW(hw_cqe));
/*
* skip cqe's not affiliated with a QP.
@@ -606,8 +605,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
struct t4_swsqe *swsqe;
- PDBG("%s out of order completion going in sw_sq at idx %u\n",
- __func__, CQE_WRID_SQ_IDX(hw_cqe));
+ pr_debug("%s out of order completion going in sw_sq at idx %u\n",
+ __func__, CQE_WRID_SQ_IDX(hw_cqe));
swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
swsqe->cqe = *hw_cqe;
swsqe->complete = 1;
@@ -641,13 +640,13 @@ proc_cqe:
BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
wq->sq.cidx = (uint16_t)idx;
- PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
+ pr_debug("%s completing sq idx %u\n", __func__, wq->sq.cidx);
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
if (c4iw_wr_log)
c4iw_log_wr_stats(wq, hw_cqe);
t4_sq_consume(wq);
} else {
- PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
+ pr_debug("%s completing rq idx %u\n", __func__, wq->rq.cidx);
*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
BUG_ON(t4_rq_empty(wq));
if (c4iw_wr_log)
@@ -664,12 +663,12 @@ flush_wq:
skip_cqe:
if (SW_CQE(hw_cqe)) {
- PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
- __func__, cq, cq->cqid, cq->sw_cidx);
+ pr_debug("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
+ __func__, cq, cq->cqid, cq->sw_cidx);
t4_swcq_consume(cq);
} else {
- PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
- __func__, cq, cq->cqid, cq->cidx);
+ pr_debug("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
+ __func__, cq, cq->cqid, cq->cidx);
t4_hwcq_consume(cq);
}
return ret;
@@ -715,10 +714,12 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->vendor_err = CQE_STATUS(&cqe);
wc->wc_flags = 0;
- PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
- "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
- CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
- CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
+ pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
+ __func__, CQE_QPID(&cqe),
+ CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
+ CQE_STATUS(&cqe), CQE_LEN(&cqe),
+ CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
+ (unsigned long long)cookie);
if (CQE_TYPE(&cqe) == 0) {
if (!CQE_STATUS(&cqe))
@@ -766,8 +767,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->opcode = IB_WC_SEND;
break;
default:
- printk(KERN_ERR MOD "Unexpected opcode %d "
- "in the CQE received for QPID=0x%0x\n",
+ pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
CQE_OPCODE(&cqe), CQE_QPID(&cqe));
ret = -EINVAL;
goto out;
@@ -822,8 +822,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->status = IB_WC_WR_FLUSH_ERR;
break;
default:
- printk(KERN_ERR MOD
- "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
+ pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
CQE_STATUS(&cqe), CQE_QPID(&cqe));
wc->status = IB_WC_FATAL_ERR;
}
@@ -860,7 +859,7 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
struct c4iw_cq *chp;
struct c4iw_ucontext *ucontext;
- PDBG("%s ib_cq %p\n", __func__, ib_cq);
+ pr_debug("%s ib_cq %p\n", __func__, ib_cq);
chp = to_c4iw_cq(ib_cq);
remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
@@ -892,7 +891,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
size_t memsize, hwentries;
struct c4iw_mm_entry *mm, *mm2;
- PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
+ pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
if (attr->flags)
return ERR_PTR(-EINVAL);
@@ -998,9 +997,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
mm2->len = PAGE_SIZE;
insert_mmap(ucontext, mm2);
}
- PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
- __func__, chp->cq.cqid, chp, chp->cq.size,
- chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
+ pr_debug("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
+ __func__, chp->cq.cqid, chp, chp->cq.size,
+ chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
return &chp->ibcq;
err6:
kfree(mm2);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 4e4f1a732b01..329fb65e8fb0 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -334,7 +334,7 @@ static int qp_release(struct inode *inode, struct file *file)
{
struct c4iw_debugfs_data *qpd = file->private_data;
if (!qpd) {
- printk(KERN_INFO "%s null qpd?\n", __func__);
+ pr_info("%s null qpd?\n", __func__);
return 0;
}
vfree(qpd->buf);
@@ -422,7 +422,7 @@ static int stag_release(struct inode *inode, struct file *file)
{
struct c4iw_debugfs_data *stagd = file->private_data;
if (!stagd) {
- printk(KERN_INFO "%s null stagd?\n", __func__);
+ pr_info("%s null stagd?\n", __func__);
return 0;
}
vfree(stagd->buf);
@@ -796,15 +796,14 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
* cqid and qpid range must match for now.
*/
if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
- pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
+ pr_err("%s: unsupported udb/ucq densities %u/%u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
rdev->lldi.ucq_density);
return -EINVAL;
}
if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
- pr_err(MOD "%s: unsupported qp and cq id ranges "
- "qp start %u size %u cq start %u size %u\n",
+ pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
rdev->lldi.vr->cq.size);
@@ -813,23 +812,20 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->qpmask = rdev->lldi.udb_density - 1;
rdev->cqmask = rdev->lldi.ucq_density - 1;
- PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
- "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
- "qp qid start %u size %u cq qid start %u size %u\n",
- __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
- rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
- rdev->lldi.vr->pbl.start,
- rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
- rdev->lldi.vr->rq.size,
- rdev->lldi.vr->qp.start,
- rdev->lldi.vr->qp.size,
- rdev->lldi.vr->cq.start,
- rdev->lldi.vr->cq.size);
- PDBG("udb %pR db_reg %p gts_reg %p "
- "qpmask 0x%x cqmask 0x%x\n",
- &rdev->lldi.pdev->resource[2],
- rdev->lldi.db_reg, rdev->lldi.gts_reg,
- rdev->qpmask, rdev->cqmask);
+ pr_debug("%s dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
+ __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
+ rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
+ rdev->lldi.vr->pbl.start,
+ rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
+ rdev->lldi.vr->rq.size,
+ rdev->lldi.vr->qp.start,
+ rdev->lldi.vr->qp.size,
+ rdev->lldi.vr->cq.start,
+ rdev->lldi.vr->cq.size);
+ pr_debug("udb %pR db_reg %p gts_reg %p qpmask 0x%x cqmask 0x%x\n",
+ &rdev->lldi.pdev->resource[2],
+ rdev->lldi.db_reg, rdev->lldi.gts_reg,
+ rdev->qpmask, rdev->cqmask);
if (c4iw_num_stags(rdev) == 0)
return -EINVAL;
@@ -843,22 +839,22 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
if (err) {
- printk(KERN_ERR MOD "error %d initializing resources\n", err);
+ pr_err("error %d initializing resources\n", err);
return err;
}
err = c4iw_pblpool_create(rdev);
if (err) {
- printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
+ pr_err("error %d initializing pbl pool\n", err);
goto destroy_resource;
}
err = c4iw_rqtpool_create(rdev);
if (err) {
- printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
+ pr_err("error %d initializing rqt pool\n", err);
goto destroy_pblpool;
}
err = c4iw_ocqp_pool_create(rdev);
if (err) {
- printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
+ pr_err("error %d initializing ocqp pool\n", err);
goto destroy_rqtpool;
}
rdev->status_page = (struct t4_dev_status_page *)
@@ -936,7 +932,7 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
static void c4iw_remove(struct uld_ctx *ctx)
{
- PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
+ pr_debug("%s c4iw_dev %p\n", __func__, ctx->dev);
c4iw_unregister_device(ctx->dev);
c4iw_dealloc(ctx);
}
@@ -954,25 +950,25 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
int ret;
if (!rdma_supported(infop)) {
- printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
- pci_name(infop->pdev));
+ pr_info("%s: RDMA not supported on this device\n",
+ pci_name(infop->pdev));
return ERR_PTR(-ENOSYS);
}
if (!ocqp_supported(infop))
- pr_info("%s: On-Chip Queues not supported on this device.\n",
+ pr_info("%s: On-Chip Queues not supported on this device\n",
pci_name(infop->pdev));
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
if (!devp) {
- printk(KERN_ERR MOD "Cannot allocate ib device\n");
+ pr_err("Cannot allocate ib device\n");
return ERR_PTR(-ENOMEM);
}
devp->rdev.lldi = *infop;
/* init various hw-queue params based on lld info */
- PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
- __func__, devp->rdev.lldi.sge_ingpadboundary,
- devp->rdev.lldi.sge_egrstatuspagesize);
+ pr_debug("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
+ __func__, devp->rdev.lldi.sge_ingpadboundary,
+ devp->rdev.lldi.sge_egrstatuspagesize);
devp->rdev.hw_queue.t4_eq_status_entries =
devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
@@ -1000,7 +996,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
pci_resource_len(devp->rdev.lldi.pdev, 2));
if (!devp->rdev.bar2_kva) {
- pr_err(MOD "Unable to ioremap BAR2\n");
+ pr_err("Unable to ioremap BAR2\n");
ib_dealloc_device(&devp->ibdev);
return ERR_PTR(-EINVAL);
}
@@ -1012,20 +1008,19 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
devp->rdev.lldi.vr->ocq.size);
if (!devp->rdev.oc_mw_kva) {
- pr_err(MOD "Unable to ioremap onchip mem\n");
+ pr_err("Unable to ioremap onchip mem\n");
ib_dealloc_device(&devp->ibdev);
return ERR_PTR(-EINVAL);
}
}
- PDBG(KERN_INFO MOD "ocq memory: "
- "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
- devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
- devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
+ pr_debug("ocq memory: hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
+ devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
+ devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
ret = c4iw_rdev_open(&devp->rdev);
if (ret) {
- printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
+ pr_err("Unable to open CXIO rdev err %d\n", ret);
ib_dealloc_device(&devp->ibdev);
return ERR_PTR(ret);
}
@@ -1071,17 +1066,17 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
}
ctx->lldi = *infop;
- PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
- __func__, pci_name(ctx->lldi.pdev),
- ctx->lldi.nchan, ctx->lldi.nrxq,
- ctx->lldi.ntxq, ctx->lldi.nports);
+ pr_debug("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
+ __func__, pci_name(ctx->lldi.pdev),
+ ctx->lldi.nchan, ctx->lldi.nrxq,
+ ctx->lldi.ntxq, ctx->lldi.nports);
mutex_lock(&dev_mutex);
list_add_tail(&ctx->entry, &uld_ctx_list);
mutex_unlock(&dev_mutex);
for (i = 0; i < ctx->lldi.nrxq; i++)
- PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
+ pr_debug("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
out:
return ctx;
}
@@ -1138,8 +1133,7 @@ static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
goto out;
if (c4iw_handlers[opcode] == NULL) {
- pr_info("%s no handler opcode 0x%x...\n", __func__,
- opcode);
+ pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
kfree_skb(skb);
goto out;
}
@@ -1176,13 +1170,11 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
if (recv_rx_pkt(dev, gl, rsp))
return 0;
- pr_info("%s: unexpected FL contents at %p, " \
- "RSS %#llx, FL %#llx, len %u\n",
- pci_name(ctx->lldi.pdev), gl->va,
- (unsigned long long)be64_to_cpu(*rsp),
- (unsigned long long)be64_to_cpu(
- *(__force __be64 *)gl->va),
- gl->tot_len);
+ pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n",
+ pci_name(ctx->lldi.pdev), gl->va,
+ be64_to_cpu(*rsp),
+ be64_to_cpu(*(__force __be64 *)gl->va),
+ gl->tot_len);
return 0;
} else {
@@ -1195,8 +1187,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
if (c4iw_handlers[opcode]) {
c4iw_handlers[opcode](dev, skb);
} else {
- pr_info("%s no handler opcode 0x%x...\n", __func__,
- opcode);
+ pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
kfree_skb(skb);
}
@@ -1209,17 +1200,16 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
{
struct uld_ctx *ctx = handle;
- PDBG("%s new_state %u\n", __func__, new_state);
+ pr_debug("%s new_state %u\n", __func__, new_state);
switch (new_state) {
case CXGB4_STATE_UP:
- printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
+ pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
if (!ctx->dev) {
int ret;
ctx->dev = c4iw_alloc(&ctx->lldi);
if (IS_ERR(ctx->dev)) {
- printk(KERN_ERR MOD
- "%s: initialization failed: %ld\n",
+ pr_err("%s: initialization failed: %ld\n",
pci_name(ctx->lldi.pdev),
PTR_ERR(ctx->dev));
ctx->dev = NULL;
@@ -1227,22 +1217,19 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
}
ret = c4iw_register_device(ctx->dev);
if (ret) {
- printk(KERN_ERR MOD
- "%s: RDMA registration failed: %d\n",
+ pr_err("%s: RDMA registration failed: %d\n",
pci_name(ctx->lldi.pdev), ret);
c4iw_dealloc(ctx);
}
}
break;
case CXGB4_STATE_DOWN:
- printk(KERN_INFO MOD "%s: Down\n",
- pci_name(ctx->lldi.pdev));
+ pr_info("%s: Down\n", pci_name(ctx->lldi.pdev));
if (ctx->dev)
c4iw_remove(ctx);
break;
case CXGB4_STATE_START_RECOVERY:
- printk(KERN_INFO MOD "%s: Fatal Error\n",
- pci_name(ctx->lldi.pdev));
+ pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev));
if (ctx->dev) {
struct ib_event event;
@@ -1255,8 +1242,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
}
break;
case CXGB4_STATE_DETACH:
- printk(KERN_INFO MOD "%s: Detach\n",
- pci_name(ctx->lldi.pdev));
+ pr_info("%s: Detach\n", pci_name(ctx->lldi.pdev));
if (ctx->dev)
c4iw_remove(ctx);
break;
@@ -1406,9 +1392,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
t4_sq_host_wq_pidx(&qp->wq),
t4_sq_wq_size(&qp->wq));
if (ret) {
- pr_err(MOD "%s: Fatal error - "
- "DB overflow recovery failed - "
- "error syncing SQ qid %u\n",
+ pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
spin_unlock(&qp->lock);
spin_unlock_irq(&qp->rhp->lock);
@@ -1422,9 +1406,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
t4_rq_wq_size(&qp->wq));
if (ret) {
- pr_err(MOD "%s: Fatal error - "
- "DB overflow recovery failed - "
- "error syncing RQ qid %u\n",
+ pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
spin_unlock(&qp->lock);
spin_unlock_irq(&qp->rhp->lock);
@@ -1455,7 +1437,7 @@ static void recover_queues(struct uld_ctx *ctx)
/* flush the SGE contexts */
ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
if (ret) {
- printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
+ pr_err("%s: Fatal error - DB overflow recovery failed\n",
pci_name(ctx->lldi.pdev));
return;
}
@@ -1513,8 +1495,8 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
mutex_unlock(&ctx->dev->rdev.stats.lock);
break;
default:
- printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
- pci_name(ctx->lldi.pdev), control);
+ pr_warn("%s: unknown control cmd %u\n",
+ pci_name(ctx->lldi.pdev), control);
break;
}
return 0;
@@ -1543,8 +1525,7 @@ static int __init c4iw_init_module(void)
c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
if (!c4iw_debugfs_root)
- printk(KERN_WARNING MOD
- "could not create debugfs entry, continuing\n");
+ pr_warn("could not create debugfs entry, continuing\n");
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index bdfac2ccb704..8f963df0bffc 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -47,17 +47,16 @@ static void print_tpte(struct c4iw_dev *dev, u32 stag)
"%s cxgb4_read_tpte err %d\n", __func__, ret);
return;
}
- PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d "
- "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
- stag & 0xffffff00,
- FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
- FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
- FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
- FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
- FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
- FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
- ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
- ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
+ pr_debug("stag idx 0x%x valid %d key 0x%x state %d pdid %d perm 0x%x ps %d len 0x%llx va 0x%llx\n",
+ stag & 0xffffff00,
+ FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
+ FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
+ FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
+ ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
+ ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
}
static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
@@ -71,9 +70,9 @@ static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
- PDBG("%016llx %016llx %016llx %016llx\n",
- be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
- be64_to_cpu(p[3]));
+ pr_debug("%016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
+ be64_to_cpu(p[3]));
/*
* Ingress WRITE and READ_RESP errors provide
@@ -124,8 +123,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
spin_lock_irq(&dev->lock);
qhp = get_qhp(dev, CQE_QPID(err_cqe));
if (!qhp) {
- printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
- "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
CQE_QPID(err_cqe),
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
@@ -140,8 +138,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
cqid = qhp->attr.rcq;
chp = get_chp(dev, cqid);
if (!chp) {
- printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
- "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
cqid, CQE_QPID(err_cqe),
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
@@ -165,7 +162,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
/* Completion Events */
case T4_ERR_SUCCESS:
- printk(KERN_ERR MOD "AE with status 0!\n");
+ pr_err("AE with status 0!\n");
break;
case T4_ERR_STAG:
@@ -207,7 +204,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
break;
default:
- printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
+ pr_err("Unknown T4 status 0x%x QPID 0x%x\n",
CQE_STATUS(err_cqe), qhp->wq.sq.qid);
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
break;
@@ -237,7 +234,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
} else {
- PDBG("%s unknown cqid 0x%x\n", __func__, qid);
+ pr_debug("%s unknown cqid 0x%x\n", __func__, qid);
spin_unlock_irqrestore(&dev->lock, flag);
}
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 5846c47c8d55..819a30635d53 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -64,12 +64,11 @@
#define DRV_NAME "iw_cxgb4"
#define MOD DRV_NAME ":"
-extern int c4iw_debug;
-#define PDBG(fmt, args...) \
-do { \
- if (c4iw_debug) \
- printk(MOD fmt, ## args); \
-} while (0)
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "t4.h"
@@ -231,15 +230,15 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
if (!ret) {
- PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
- func, pci_name(rdev->lldi.pdev), hwtid, qpid);
+ pr_debug("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
+ func, pci_name(rdev->lldi.pdev), hwtid, qpid);
rdev->flags |= T4_FATAL_ERROR;
wr_waitp->ret = -EIO;
}
out:
if (wr_waitp->ret)
- PDBG("%s: FW reply %d tid %u qpid %u\n",
- pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
+ pr_debug("%s: FW reply %d tid %u qpid %u\n",
+ pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
return wr_waitp->ret;
}
@@ -538,8 +537,9 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
if (mm->key == key && mm->len == len) {
list_del_init(&mm->entry);
spin_unlock(&ucontext->mmap_lock);
- PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
- key, (unsigned long long) mm->addr, mm->len);
+ pr_debug("%s key 0x%x addr 0x%llx len %d\n",
+ __func__, key,
+ (unsigned long long)mm->addr, mm->len);
return mm;
}
}
@@ -551,8 +551,8 @@ static inline void insert_mmap(struct c4iw_ucontext *ucontext,
struct c4iw_mm_entry *mm)
{
spin_lock(&ucontext->mmap_lock);
- PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
- mm->key, (unsigned long long) mm->addr, mm->len);
+ pr_debug("%s key 0x%x addr 0x%llx len %d\n",
+ __func__, mm->key, (unsigned long long)mm->addr, mm->len);
list_add_tail(&mm->entry, &ucontext->mmaps);
spin_unlock(&ucontext->mmap_lock);
}
@@ -670,17 +670,19 @@ enum c4iw_mmid_state {
#define MPA_V2_RDMA_READ_RTR 0x4000
#define MPA_V2_IRD_ORD_MASK 0x3FFF
-#define c4iw_put_ep(ep) { \
- PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
- ep, kref_read(&((ep)->kref))); \
- WARN_ON(kref_read(&((ep)->kref)) < 1); \
- kref_put(&((ep)->kref), _c4iw_free_ep); \
+#define c4iw_put_ep(ep) { \
+ pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n", \
+ __func__, __LINE__, \
+ ep, kref_read(&((ep)->kref))); \
+ WARN_ON(kref_read(&((ep)->kref)) < 1); \
+ kref_put(&((ep)->kref), _c4iw_free_ep); \
}
-#define c4iw_get_ep(ep) { \
- PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
- ep, kref_read(&((ep)->kref))); \
- kref_get(&((ep)->kref)); \
+#define c4iw_get_ep(ep) { \
+ pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n", \
+ __func__, __LINE__, \
+ ep, kref_read(&((ep)->kref))); \
+ kref_get(&((ep)->kref)); \
}
void _c4iw_free_ep(struct kref *kref);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 410408f886c1..3ee7f43e419a 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -38,9 +38,9 @@
#include "iw_cxgb4.h"
-int use_dsgl = 0;
+int use_dsgl = 1;
module_param(use_dsgl, int, 0644);
-MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=0)");
+MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1) (DEPRECATED)");
#define T4_ULPTX_MIN_IO 32
#define C4IW_MAX_INLINE_SIZE 96
@@ -125,7 +125,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
addr &= 0x7FFFFFF;
- PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
+ pr_debug("%s addr 0x%x len %u\n", __func__, addr, len);
num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
c4iw_init_wr_wait(&wr_wait);
for (i = 0; i < num_wqe; i++) {
@@ -231,13 +231,11 @@ out:
static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
void *data, struct sk_buff *skb)
{
- if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
+ if (rdev->lldi.ulptx_memwrite_dsgl && use_dsgl) {
if (len > inline_threshold) {
if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) {
- printk_ratelimited(KERN_WARNING
- "%s: dma map"
- " failure (non fatal)\n",
- pci_name(rdev->lldi.pdev));
+ pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
+ pci_name(rdev->lldi.pdev));
return _c4iw_write_mem_inline(rdev, addr, len,
data, skb);
} else {
@@ -289,8 +287,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
mutex_unlock(&rdev->stats.lock);
*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
}
- PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
- __func__, stag_state, type, pdid, stag_idx);
+ pr_debug("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+ __func__, stag_state, type, pdid, stag_idx);
/* write TPT entry */
if (reset_tpt_entry)
@@ -331,9 +329,9 @@ static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
{
int err;
- PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
- __func__, pbl_addr, rdev->lldi.vr->pbl.start,
- pbl_size);
+ pr_debug("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
+ __func__, pbl_addr, rdev->lldi.vr->pbl.start,
+ pbl_size);
err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL);
return err;
@@ -376,7 +374,7 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
mhp->attr.stag = stag;
mmid = stag >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+ pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
}
@@ -426,7 +424,7 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
int ret;
u32 stag = T4_STAG_UNSET;
- PDBG("%s ib_pd %p\n", __func__, pd);
+ pr_debug("%s ib_pd %p\n", __func__, pd);
php = to_c4iw_pd(pd);
rhp = php->rhp;
@@ -483,7 +481,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct c4iw_pd *php;
struct c4iw_mr *mhp;
- PDBG("%s ib_pd %p\n", __func__, pd);
+ pr_debug("%s ib_pd %p\n", __func__, pd);
if (length == ~0ULL)
return ERR_PTR(-EINVAL);
@@ -517,7 +515,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(err);
}
- shift = ffs(mhp->umem->page_size) - 1;
+ shift = mhp->umem->page_shift;
n = mhp->umem->nmap;
err = alloc_pbl(mhp, n);
@@ -536,7 +534,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
len = sg_dma_len(sg) >> shift;
for (k = 0; k < len; ++k) {
pages[i++] = cpu_to_be64(sg_dma_address(sg) +
- mhp->umem->page_size * k);
+ (k << shift));
if (i == PAGE_SIZE / sizeof *pages) {
err = write_pbl(&mhp->rhp->rdev,
pages,
@@ -620,7 +618,7 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
ret = -ENOMEM;
goto dealloc_win;
}
- PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmw);
dealloc_win:
@@ -645,7 +643,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
kfree_skb(mhp->dereg_skb);
kfree(mhp);
- PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
+ pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
return 0;
}
@@ -703,7 +701,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
goto err3;
}
- PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmr);
err3:
dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
@@ -748,7 +746,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
struct c4iw_mr *mhp;
u32 mmid;
- PDBG("%s ib_mr %p\n", __func__, ib_mr);
+ pr_debug("%s ib_mr %p\n", __func__, ib_mr);
mhp = to_c4iw_mr(ib_mr);
rhp = mhp->rhp;
@@ -766,7 +764,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
kfree((void *) (unsigned long) mhp->kva);
if (mhp->umem)
ib_umem_release(mhp->umem);
- PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
+ pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
kfree(mhp);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index df64417ab6f2..0771e9a4d061 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -59,7 +59,7 @@ module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr,
+ struct rdma_ah_attr *ah_attr,
struct ib_udata *udata)
{
@@ -102,7 +102,7 @@ void _c4iw_free_ucontext(struct kref *kref)
ucontext = container_of(kref, struct c4iw_ucontext, kref);
rhp = to_c4iw_dev(ucontext->ibucontext.device);
- PDBG("%s ucontext %p\n", __func__, ucontext);
+ pr_debug("%s ucontext %p\n", __func__, ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
@@ -113,7 +113,7 @@ static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
- PDBG("%s context %p\n", __func__, context);
+ pr_debug("%s context %p\n", __func__, context);
c4iw_put_ucontext(ucontext);
return 0;
}
@@ -123,12 +123,11 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
{
struct c4iw_ucontext *context;
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
- static int warned;
struct c4iw_alloc_ucontext_resp uresp;
int ret = 0;
struct c4iw_mm_entry *mm = NULL;
- PDBG("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("%s ibdev %p\n", __func__, ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) {
ret = -ENOMEM;
@@ -141,8 +140,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
kref_init(&context->kref);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
- if (!warned++)
- pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
+ pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
} else {
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
@@ -187,8 +185,8 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
struct c4iw_ucontext *ucontext;
u64 addr;
- PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
- key, len);
+ pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
+ key, len);
if (vma->vm_start & (PAGE_SIZE-1))
return -EINVAL;
@@ -253,7 +251,7 @@ static int c4iw_deallocate_pd(struct ib_pd *pd)
php = to_c4iw_pd(pd);
rhp = php->rhp;
- PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
+ pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur--;
@@ -270,7 +268,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
u32 pdid;
struct c4iw_dev *rhp;
- PDBG("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("%s ibdev %p\n", __func__, ibdev);
rhp = (struct c4iw_dev *) ibdev;
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
if (!pdid)
@@ -293,14 +291,14 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
mutex_unlock(&rhp->rdev.stats.lock);
- PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
+ pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
return &php->ibpd;
}
static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
- PDBG("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("%s ibdev %p\n", __func__, ibdev);
*pkey = 0;
return 0;
}
@@ -310,8 +308,8 @@ static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
{
struct c4iw_dev *dev;
- PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
- __func__, ibdev, port, index, gid);
+ pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
+ __func__, ibdev, port, index, gid);
dev = to_c4iw_dev(ibdev);
BUG_ON(port == 0);
memset(&(gid->raw[0]), 0, sizeof(gid->raw));
@@ -325,7 +323,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
struct c4iw_dev *dev;
- PDBG("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("%s ibdev %p\n", __func__, ibdev);
if (uhw->inlen || uhw->outlen)
return -EINVAL;
@@ -366,7 +364,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct net_device *netdev;
struct in_device *inetdev;
- PDBG("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("%s ibdev %p\n", __func__, ibdev);
dev = to_c4iw_dev(ibdev);
netdev = dev->rdev.lldi.ports[port-1];
@@ -408,7 +406,7 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
- PDBG("%s dev 0x%p\n", __func__, dev);
+ pr_debug("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%d\n",
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
@@ -421,7 +419,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
struct ethtool_drvinfo info;
struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
- PDBG("%s dev 0x%p\n", __func__, dev);
+ pr_debug("%s dev 0x%p\n", __func__, dev);
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver);
}
@@ -431,7 +429,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
- PDBG("%s dev 0x%p\n", __func__, dev);
+ pr_debug("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
c4iw_dev->rdev.lldi.pdev->device);
}
@@ -524,7 +522,7 @@ static void get_dev_fw_str(struct ib_device *dev, char *str,
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev);
- PDBG("%s dev 0x%p\n", __func__, dev);
+ pr_debug("%s dev 0x%p\n", __func__, dev);
snprintf(str, str_len, "%u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
@@ -538,7 +536,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
int ret;
int i;
- PDBG("%s c4iw_dev %p\n", __func__, dev);
+ pr_debug("%s c4iw_dev %p\n", __func__, dev);
BUG_ON(!dev->rdev.lldi.ports[0]);
strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
@@ -648,7 +646,7 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
{
int i;
- PDBG("%s c4iw_dev %p\n", __func__, dev);
+ pr_debug("%s c4iw_dev %p\n", __func__, dev);
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
device_remove_file(&dev->ibdev.dev,
c4iw_class_attributes[i]);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index d4fd2f5c8326..8e4154b4253e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -254,11 +254,11 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
ret = -ENOMEM;
goto free_sq;
}
- PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
- __func__, wq->sq.queue,
- (unsigned long long)virt_to_phys(wq->sq.queue),
- wq->rq.queue,
- (unsigned long long)virt_to_phys(wq->rq.queue));
+ pr_debug("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
+ __func__, wq->sq.queue,
+ (unsigned long long)virt_to_phys(wq->sq.queue),
+ wq->rq.queue,
+ (unsigned long long)virt_to_phys(wq->rq.queue));
memset(wq->rq.queue, 0, wq->rq.memsize);
dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
* User mode must have bar2 access.
*/
if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
- pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
+ pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
goto free_dma;
}
@@ -362,9 +362,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (ret)
goto free_dma;
- PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
- __func__, wq->sq.qid, wq->rq.qid, wq->db,
- wq->sq.bar2_va, wq->rq.bar2_va);
+ pr_debug("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
+ __func__, wq->sq.qid, wq->rq.qid, wq->db,
+ wq->sq.bar2_va, wq->rq.bar2_va);
return 0;
free_dma:
@@ -725,7 +725,7 @@ static void free_qp_work(struct work_struct *work)
ucontext = qhp->ucontext;
rhp = qhp->rhp;
- PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
+ pr_debug("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
@@ -739,19 +739,19 @@ static void queue_qp_free(struct kref *kref)
struct c4iw_qp *qhp;
qhp = container_of(kref, struct c4iw_qp, kref);
- PDBG("%s qhp %p\n", __func__, qhp);
+ pr_debug("%s qhp %p\n", __func__, qhp);
queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
}
void c4iw_qp_add_ref(struct ib_qp *qp)
{
- PDBG("%s ib_qp %p\n", __func__, qp);
+ pr_debug("%s ib_qp %p\n", __func__, qp);
kref_get(&to_c4iw_qp(qp)->kref);
}
void c4iw_qp_rem_ref(struct ib_qp *qp)
{
- PDBG("%s ib_qp %p\n", __func__, qp);
+ pr_debug("%s ib_qp %p\n", __func__, qp);
kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
}
@@ -959,8 +959,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
break;
default:
- PDBG("%s post of type=%d TBD!\n", __func__,
- wr->opcode);
+ pr_debug("%s post of type=%d TBD!\n", __func__,
+ wr->opcode);
err = -EINVAL;
}
if (err) {
@@ -981,9 +981,10 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
- PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
- __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
- swsqe->opcode, swsqe->read_len);
+ pr_debug("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
+ __func__,
+ (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
+ swsqe->opcode, swsqe->read_len);
wr = wr->next;
num_wrs--;
t4_sq_produce(&qhp->wq, len16);
@@ -1057,8 +1058,9 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wqe->recv.r2[1] = 0;
wqe->recv.r2[2] = 0;
wqe->recv.len16 = len16;
- PDBG("%s cookie 0x%llx pidx %u\n", __func__,
- (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
+ pr_debug("%s cookie 0x%llx pidx %u\n",
+ __func__,
+ (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
t4_rq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
wr = wr->next;
@@ -1217,8 +1219,8 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
struct sk_buff *skb;
struct terminate_message *term;
- PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
- qhp->ep->hwtid);
+ pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ qhp->ep->hwtid);
skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
if (WARN_ON(!skb))
@@ -1254,7 +1256,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
int rq_flushed, sq_flushed;
unsigned long flag;
- PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
+ pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&rchp->lock, flag);
@@ -1339,8 +1341,8 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int ret;
struct sk_buff *skb;
- PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
- ep->hwtid);
+ pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ ep->hwtid);
skb = skb_dequeue(&ep->com.ep_skb_list);
if (WARN_ON(!skb))
@@ -1366,13 +1368,13 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
qhp->wq.sq.qid, __func__);
out:
- PDBG("%s ret %d\n", __func__, ret);
+ pr_debug("%s ret %d\n", __func__, ret);
return ret;
}
static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
{
- PDBG("%s p2p_type = %d\n", __func__, p2p_type);
+ pr_debug("%s p2p_type = %d\n", __func__, p2p_type);
memset(&init->u, 0, sizeof init->u);
switch (p2p_type) {
case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
@@ -1401,8 +1403,8 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
int ret;
struct sk_buff *skb;
- PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
- qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
+ pr_debug("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
+ qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
if (!skb) {
@@ -1474,7 +1476,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
err1:
free_ird(rhp, qhp->attr.max_ird);
out:
- PDBG("%s ret %d\n", __func__, ret);
+ pr_debug("%s ret %d\n", __func__, ret);
return ret;
}
@@ -1491,9 +1493,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int free = 0;
struct c4iw_ep *ep = NULL;
- PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
- qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
- (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
+ pr_debug("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
+ __func__,
+ qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
+ (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
mutex_lock(&qhp->mutex);
@@ -1671,16 +1674,15 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
goto err;
break;
default:
- printk(KERN_ERR "%s in a bad state %d\n",
- __func__, qhp->attr.state);
+ pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
ret = -EINVAL;
goto err;
break;
}
goto out;
err:
- PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
- qhp->wq.sq.qid);
+ pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
+ qhp->wq.sq.qid);
/* disassociate the LLP connection */
qhp->attr.llp_stream_handle = NULL;
@@ -1716,7 +1718,7 @@ out:
*/
if (free)
c4iw_put_ep(&ep->com);
- PDBG("%s exit state %d\n", __func__, qhp->attr.state);
+ pr_debug("%s exit state %d\n", __func__, qhp->attr.state);
return ret;
}
@@ -1746,7 +1748,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_qp_rem_ref(ib_qp);
- PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
+ pr_debug("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
return 0;
}
@@ -1765,7 +1767,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
- PDBG("%s ib_pd %p\n", __func__, pd);
+ pr_debug("%s ib_pd %p\n", __func__, pd);
if (attrs->qp_type != IB_QPT_RC)
return ERR_PTR(-EINVAL);
@@ -1936,11 +1938,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer));
INIT_LIST_HEAD(&qhp->db_fc_entry);
- PDBG("%s sq id %u size %u memsize %zu num_entries %u "
- "rq id %u size %u memsize %zu num_entries %u\n", __func__,
- qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
- attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
- qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
+ pr_debug("%s sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
+ __func__,
+ qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
+ attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
+ qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
return &qhp->ibqp;
err8:
kfree(ma_sync_key_mm);
@@ -1970,7 +1972,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum c4iw_qp_attr_mask mask = 0;
struct c4iw_qp_attributes attrs;
- PDBG("%s ib_qp %p\n", __func__, ibqp);
+ pr_debug("%s ib_qp %p\n", __func__, ibqp);
/* iwarp does not support the RTR state */
if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
@@ -2016,7 +2018,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
{
- PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
+ pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 67df71a7012e..8ff0cbe5cb16 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -90,7 +90,7 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table)
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
{
- PDBG("%s entry 0x%x\n", __func__, entry);
+ pr_debug("%s entry 0x%x\n", __func__, entry);
c4iw_id_free(id_table, entry);
}
@@ -141,7 +141,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
}
out:
mutex_unlock(&uctx->lock);
- PDBG("%s qid 0x%x\n", __func__, qid);
+ pr_debug("%s qid 0x%x\n", __func__, qid);
mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur;
@@ -157,7 +157,7 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
return;
- PDBG("%s qid 0x%x\n", __func__, qid);
+ pr_debug("%s qid 0x%x\n", __func__, qid);
entry->qid = qid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->cqids);
@@ -215,7 +215,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
}
out:
mutex_unlock(&uctx->lock);
- PDBG("%s qid 0x%x\n", __func__, qid);
+ pr_debug("%s qid 0x%x\n", __func__, qid);
mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur;
@@ -231,7 +231,7 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
return;
- PDBG("%s qid 0x%x\n", __func__, qid);
+ pr_debug("%s qid 0x%x\n", __func__, qid);
entry->qid = qid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->qpids);
@@ -254,7 +254,7 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
- PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
@@ -268,7 +268,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
+ pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
@@ -290,19 +290,17 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
while (pbl_start < pbl_top) {
pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
- PDBG("%s failed to add PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
+ pr_debug("%s failed to add PBL chunk (%x/%x)\n",
+ __func__, pbl_start, pbl_chunk);
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
- printk(KERN_WARNING MOD
- "Failed to add all PBL chunks (%x/%x)\n",
- pbl_start,
- pbl_top - pbl_start);
+ pr_warn("Failed to add all PBL chunks (%x/%x)\n",
+ pbl_start, pbl_top - pbl_start);
return 0;
}
pbl_chunk >>= 1;
} else {
- PDBG("%s added PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
+ pr_debug("%s added PBL chunk (%x/%x)\n",
+ __func__, pbl_start, pbl_chunk);
pbl_start += pbl_chunk;
}
}
@@ -324,9 +322,9 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
- PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
+ pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
if (!addr)
- pr_warn_ratelimited(MOD "%s: Out of RQT memory\n",
+ pr_warn_ratelimited("%s: Out of RQT memory\n",
pci_name(rdev->lldi.pdev));
mutex_lock(&rdev->stats.lock);
if (addr) {
@@ -341,7 +339,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
+ pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6);
mutex_lock(&rdev->stats.lock);
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
mutex_unlock(&rdev->stats.lock);
@@ -363,18 +361,17 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
while (rqt_start < rqt_top) {
rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
- PDBG("%s failed to add RQT chunk (%x/%x)\n",
- __func__, rqt_start, rqt_chunk);
+ pr_debug("%s failed to add RQT chunk (%x/%x)\n",
+ __func__, rqt_start, rqt_chunk);
if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
- printk(KERN_WARNING MOD
- "Failed to add all RQT chunks (%x/%x)\n",
- rqt_start, rqt_top - rqt_start);
+ pr_warn("Failed to add all RQT chunks (%x/%x)\n",
+ rqt_start, rqt_top - rqt_start);
return 0;
}
rqt_chunk >>= 1;
} else {
- PDBG("%s added RQT chunk (%x/%x)\n",
- __func__, rqt_start, rqt_chunk);
+ pr_debug("%s added RQT chunk (%x/%x)\n",
+ __func__, rqt_start, rqt_chunk);
rqt_start += rqt_chunk;
}
}
@@ -394,7 +391,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
- PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
if (addr) {
mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
@@ -407,7 +404,7 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
+ pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
mutex_unlock(&rdev->stats.lock);
@@ -429,18 +426,17 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
while (start < top) {
chunk = min(top - start + 1, chunk);
if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
- PDBG("%s failed to add OCQP chunk (%x/%x)\n",
- __func__, start, chunk);
+ pr_debug("%s failed to add OCQP chunk (%x/%x)\n",
+ __func__, start, chunk);
if (chunk <= 1024 << MIN_OCQP_SHIFT) {
- printk(KERN_WARNING MOD
- "Failed to add all OCQP chunks (%x/%x)\n",
- start, top - start);
+ pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
+ start, top - start);
return 0;
}
chunk >>= 1;
} else {
- PDBG("%s added OCQP chunk (%x/%x)\n",
- __func__, start, chunk);
+ pr_debug("%s added OCQP chunk (%x/%x)\n",
+ __func__, start, chunk);
start += chunk;
}
}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 640d22148a3e..e765c00303cd 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -466,14 +466,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
wmb();
if (wq->sq.bar2_va) {
if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
- PDBG("%s: WC wq->sq.pidx = %d\n",
- __func__, wq->sq.pidx);
+ pr_debug("%s: WC wq->sq.pidx = %d\n",
+ __func__, wq->sq.pidx);
pio_copy((u64 __iomem *)
(wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
(u64 *)wqe);
} else {
- PDBG("%s: DB wq->sq.pidx = %d\n",
- __func__, wq->sq.pidx);
+ pr_debug("%s: DB wq->sq.pidx = %d\n",
+ __func__, wq->sq.pidx);
writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid),
wq->sq.bar2_va + SGE_UDB_KDOORBELL);
}
@@ -493,14 +493,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
wmb();
if (wq->rq.bar2_va) {
if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
- PDBG("%s: WC wq->rq.pidx = %d\n",
- __func__, wq->rq.pidx);
+ pr_debug("%s: WC wq->rq.pidx = %d\n",
+ __func__, wq->rq.pidx);
pio_copy((u64 __iomem *)
(wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
(void *)wqe);
} else {
- PDBG("%s: DB wq->rq.pidx = %d\n",
- __func__, wq->rq.pidx);
+ pr_debug("%s: DB wq->rq.pidx = %d\n",
+ __func__, wq->rq.pidx);
writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
wq->rq.bar2_va + SGE_UDB_KDOORBELL);
}
@@ -601,7 +601,8 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
{
cq->sw_in_use++;
if (cq->sw_in_use == cq->size) {
- PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
+ pr_debug("%s cxgb4 sw cq overflow cqid %u\n",
+ __func__, cq->cqid);
cq->error = 1;
BUG_ON(1);
}
@@ -656,7 +657,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
ret = -EOVERFLOW;
cq->error = 1;
- printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
+ pr_err("cq overflow cqid %u\n", cq->cqid);
BUG_ON(1);
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
@@ -672,7 +673,8 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
{
if (cq->sw_in_use == cq->size) {
- PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
+ pr_debug("%s cxgb4 sw cq overflow cqid %u\n",
+ __func__, cq->cqid);
cq->error = 1;
BUG_ON(1);
return NULL;